├── .gitattributes
├── Images
├── Preview.gif
├── logo.png
├── preview1.png
├── preview2.png
└── preview_GUI.png
├── README.md
└── SourceCode
├── FanAudioVisualizer.py
├── FanBlender.py
├── FanBlender_Example.py
├── FanBlender_GUI.py
├── FanWheels_PIL.py
├── FanWheels_ffmpeg.py
├── LICENSE.txt
├── LanguagePack.py
├── Old_Version_Source_Code_Before_V110.zip
├── QtImages.py
├── QtStyle.py
├── QtViewer.py
├── QtWheels.py
├── QtWindows.py
├── ReadMe.txt
├── Source.zip
├── _CheckEnvironment.py
├── _Installer.py
└── requirements.txt
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/Images/Preview.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FerryYoungFan/FanselineVisualizer/451288d62e09db6c2f06bfc14d02c62983722071/Images/Preview.gif
--------------------------------------------------------------------------------
/Images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FerryYoungFan/FanselineVisualizer/451288d62e09db6c2f06bfc14d02c62983722071/Images/logo.png
--------------------------------------------------------------------------------
/Images/preview1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FerryYoungFan/FanselineVisualizer/451288d62e09db6c2f06bfc14d02c62983722071/Images/preview1.png
--------------------------------------------------------------------------------
/Images/preview2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FerryYoungFan/FanselineVisualizer/451288d62e09db6c2f06bfc14d02c62983722071/Images/preview2.png
--------------------------------------------------------------------------------
/Images/preview_GUI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FerryYoungFan/FanselineVisualizer/451288d62e09db6c2f06bfc14d02c62983722071/Images/preview_GUI.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Fanseline Visualizer - 帆室邻音频可视化视频制作工具
2 |
3 | Convert audio files to visualized video
4 | 将音频文件转化为可视化视频
5 |
6 | Ver.1.1.6 发行版 Release for Windows, macOS, Ubuntu X64:
7 | https://github.com/FerryYoungFan/FanselineVisualizer/releases
8 |
9 | See How to use:
10 | 查看如何使用:
11 | https://www.youtube.com/watch?v=ziSsiIvTB_o
12 | https://www.bilibili.com/video/BV1AD4y1D7fd
13 |
14 | 
15 | 
16 | 
17 | 
18 |
19 | ---
20 | ## Support this Project - 支持此项目
21 |
22 | If you like this software, please star this repository on GitHub.
23 | ... or you can directly support me with money:
24 | https://afdian.net/@Fanseline
25 |
26 | 如果您喜欢本软件,请在GitHub给此项目打星。
27 | ……或者您也可以直接给咱打钱:
28 | https://afdian.net/@Fanseline
29 |
--------------------------------------------------------------------------------
/SourceCode/FanAudioVisualizer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from FanWheels_PIL import *
5 | import numpy as np
6 | from pydub import AudioSegment
7 |
8 |
9 | class AudioAnalyzer:
10 | def __init__(self, file_path, ffmpeg_path, fps=30, fq_low=20, fq_up=6000, bins=80, smooth=0, beat_detect=60,
11 | low_range=10):
12 | AudioSegment.ffmpeg = ffmpeg_path
13 | sound = AudioSegment.from_file(file_path)
14 | self.samples = np.asarray(sound.get_array_of_samples(), dtype=np.float)
15 | self.fq_low = fq_low
16 | self.fq_up = fq_up
17 | self.bins = bins
18 | self.low_range = np.clip(low_range / 100, 0.0, 1.0)
19 | if smooth:
20 | self.smooth = smooth
21 | else:
22 | self.smooth = 0
23 | if np.max(self.samples) != 0:
24 | self.samples = self.samples / np.max(self.samples)
25 | self.sample_rate = sound.frame_rate
26 | self.T = 1.0 / self.sample_rate
27 |
28 | self.fps = fps
29 | self.totalFrames = self.getTotalFrames()
30 | self.hist_stack = [None] * self.totalFrames
31 |
32 | self.beat_stack = np.ones(self.totalFrames)
33 | self.beat_calc_stack = [None] * self.totalFrames
34 | self.beat_detect = beat_detect
35 | self.beat_thres = (100 - beat_detect) / 100
36 |
37 | def fftAnalyzer(self, start_p, stop_p, fq_low=20, fq_up=6000, bins=80):
38 | freq_array = np.zeros(bins)
39 | if stop_p <= 0:
40 | return freq_array
41 | if start_p < 0:
42 | start_p = 0
43 | if start_p >= self.samples.shape[0] - self.sample_rate / fq_low:
44 | return freq_array
45 | if stop_p >= self.samples.shape[0]:
46 | stop_p = self.samples.shape[0] - 1
47 | y = self.samples[start_p:stop_p]
48 | N = y.shape[0]
49 | yf = np.fft.fft(y)
50 | yf_fq = 2.0 / N * np.abs(yf[:N // 2])
51 | xf = np.linspace(0.0, 1.0 / (2.0 * self.T), N // 2) # Frequency domain: 0 to 1/(2T)
52 | yf_cut = yf_fq[np.where(np.logical_and(xf >= fq_low, xf <= fq_up))]
53 | xf_cut = xf[np.where(np.logical_and(xf >= fq_low, xf <= fq_up))]
54 | xf_log = squareSpace(1, len(yf_cut) + 1, bins + 1)
55 | for i in range(bins):
56 | win_low, win_up = int(xf_log[i]), int(xf_log[i + 1])
57 | if win_low < 0:
58 | win_low = 0
59 | if win_up > len(yf_cut) - 1:
60 | win_up = len(yf_cut) - 1
61 | if win_up - win_low > 0:
62 | freq_array[i] = np.sum(yf_cut[win_low:win_up]) * psyModel((xf_cut[win_low] + xf_cut[win_low]) / 2)
63 | else:
64 | freq_array[i] = 0
65 | return freq_array
66 |
67 | def getSampleRate(self):
68 | return self.sample_rate
69 |
70 | def getLength(self):
71 | return self.samples.shape[0]
72 |
73 | def getTotalFrames(self):
74 | return int(self.fps * self.getLength() / self.getSampleRate()) + 1
75 |
76 | def getHistAtFrame(self, index):
77 | smooth = int(round(self.smooth * self.fps / 30))
78 | if smooth >= 1 + 4:
79 | fcount = 0
80 | freq_acc = np.zeros(self.bins)
81 | for i in range(smooth - 4 + 1):
82 | fcount = fcount + 2
83 | if index - i < 0:
84 | pass
85 | else:
86 | if self.hist_stack[index - i] is None:
87 | left, right = self.getRange(index - i, smooth)
88 | self.hist_stack[index - i] = self.fftAnalyzer(left, right, self.fq_low, self.fq_up, self.bins)
89 | freq_acc += self.hist_stack[index - i]
90 | if index + i > len(self.hist_stack) - 1:
91 | pass
92 | else:
93 | if self.hist_stack[index + i] is None:
94 | left, right = self.getRange(index + i, smooth)
95 | self.hist_stack[index + i] = self.fftAnalyzer(left, right, self.fq_low, self.fq_up, self.bins)
96 | freq_acc += self.hist_stack[index + i]
97 | return freq_acc / fcount
98 |
99 | else:
100 | if self.hist_stack[index] is None:
101 | left, right = self.getRange(index, smooth)
102 | self.hist_stack[index] = self.fftAnalyzer(left, right, self.fq_low, self.fq_up, self.bins)
103 | return self.hist_stack[index]
104 |
105 | def getBeatAtFrame(self, index):
106 | if self.beat_detect > 0:
107 | index = self.clipRange(index)
108 | left = self.clipRange(index - self.fps / 6)
109 | right = self.clipRange(index + self.fps / 6)
110 | for i in range(left, right + 1):
111 | if self.hist_stack[i] is None:
112 | self.getHistAtFrame(i)
113 | if self.beat_calc_stack[i] is None:
114 | calc_nums = self.bins * self.low_range # Frequency range for the bass
115 | maxv = np.max(self.hist_stack[i][0:int(np.ceil(calc_nums))]) ** 2
116 | avgv = np.average(self.hist_stack[i][0:int(np.ceil(calc_nums))])
117 | self.beat_calc_stack[i] = np.sqrt(max(maxv, avgv))
118 |
119 | slice_stack = self.beat_calc_stack[left:right + 1]
120 | current_max = np.max(slice_stack)
121 | index_max = np.where(slice_stack == current_max)[0][0]
122 | standby = np.sum(self.beat_stack[index:right + 1] == 1.0) == len(self.beat_stack[index:right + 1])
123 |
124 | if self.beat_calc_stack[index] >= self.beat_thres and index - left == index_max and standby:
125 | self.beat_stack[index:right + 1] = list(1 + 0.05 * (np.linspace(1, 0, right + 1 - index) ** 2))
126 | return self.beat_stack[index]
127 |
128 | def clipRange(self, index):
129 | if index >= self.totalFrames:
130 | return self.totalFrames - 1
131 | if index < 0:
132 | return 0
133 | return int(round(index))
134 |
135 | def getRange(self, idx, smooth=0): # Get FFT range
136 | if idx < 0:
137 | idx = -5
138 | if idx > self.totalFrames:
139 | idx = -5
140 | middle = idx * self.getSampleRate() / self.fps
141 | offset = self.sample_rate / 20
142 | if smooth == 1:
143 | lt = int(round(middle - 1 * offset))
144 | rt = int(round(middle + 2 * offset))
145 | elif smooth == 2:
146 | lt = int(round(middle - 2 * offset))
147 | rt = int(round(middle + 2 * offset))
148 | elif 2 < smooth <= 5:
149 | lt = int(round(middle - 2 * offset))
150 | rt = int(round(middle + 4 * offset))
151 | elif smooth > 5:
152 | lt = int(round(middle - 3 * offset))
153 | rt = int(round(middle + 6 * offset))
154 | else:
155 | lt = int(round(middle - 1 * offset))
156 | rt = int(round(middle + 1 * offset))
157 | return lt, rt
158 |
159 |
160 | def circle(draw, center, radius, fill):
161 | draw.ellipse((center[0] - radius + 1, center[1] - radius + 1, center[0] + radius - 1, center[1] + radius - 1),
162 | fill=fill, outline=None)
163 |
164 |
165 | def rectangle(draw, center, radius, fill):
166 | draw.rectangle((center[0] - radius + 1, center[1] - radius + 1, center[0] + radius - 1, center[1] + radius - 1),
167 | fill=fill, outline=None)
168 |
169 |
170 | def getCycleHue(start, end, bins, index, cycle=1):
171 | div = end - start
172 | fac = index / bins * cycle
173 | ratio = abs(round(fac) - fac) * 2
174 | return (div * ratio + start) / 360
175 |
176 |
177 | def getColor(bins, index, color_mode="color4x", bright=1.0, sat=0.8):
178 | brt = 0.4 + bright * 0.6
179 | if color_mode == "color4x":
180 | return hsv_to_rgb(4 * index / bins, sat, brt) + (255,)
181 | if color_mode == "color2x":
182 | return hsv_to_rgb(2 * index / bins, sat, brt) + (255,)
183 | if color_mode == "color1x":
184 | return hsv_to_rgb(1 * index / bins, sat, brt) + (255,)
185 | if color_mode == "white":
186 | return hsv_to_rgb(0, 0, 1.0) + (255,)
187 | if color_mode == "black":
188 | return hsv_to_rgb(0, 0, 0) + (255,)
189 | if color_mode == "gray":
190 | return hsv_to_rgb(0, 0, brt) + (255,)
191 | if color_mode == "red":
192 | return hsv_to_rgb(0, sat, brt) + (255,)
193 | if color_mode == "green":
194 | return hsv_to_rgb(120 / 360, sat, brt) + (255,)
195 | if color_mode == "blue":
196 | return hsv_to_rgb(211 / 360, sat, brt) + (255,)
197 | if color_mode == "yellow":
198 | return hsv_to_rgb(49 / 360, sat, brt) + (255,)
199 | if color_mode == "magenta":
200 | return hsv_to_rgb(328 / 360, sat, brt) + (255,)
201 | if color_mode == "purple":
202 | return hsv_to_rgb(274 / 360, sat, brt) + (255,)
203 | if color_mode == "cyan":
204 | return hsv_to_rgb(184 / 360, sat, brt) + (255,)
205 | if color_mode == "lightgreen":
206 | return hsv_to_rgb(135 / 360, sat, brt) + (255,)
207 | if color_mode == "green-blue":
208 | return hsv_to_rgb(getCycleHue(122, 220, bins, index, 4), sat, brt) + (255,)
209 | if color_mode == "magenta-purple":
210 | return hsv_to_rgb(getCycleHue(300, 370, bins, index, 4), sat, brt) + (255,)
211 | if color_mode == "red-yellow":
212 | return hsv_to_rgb(getCycleHue(-5, 40, bins, index, 4), sat, brt) + (255,)
213 | if color_mode == "yellow-green":
214 | return hsv_to_rgb(getCycleHue(42, 147, bins, index, 4), sat, brt) + (255,)
215 | if color_mode == "blue-purple":
216 | return hsv_to_rgb(getCycleHue(208, 313, bins, index, 4), sat, brt) + (255,)
217 |
218 | try:
219 | clist = tuple(color_mode)
220 | if len(clist) == 3:
221 | return clist + (255,)
222 | else:
223 | return clist
224 | except:
225 | return hsv_to_rgb(0, 0, brt) + (255,)
226 |
227 |
228 | class AudioVisualizer:
229 | def __init__(self, img, rad_min, rad_max, line_thick=1.0, blur=5, style=0):
230 | self.background = img.copy()
231 | self.width, self.height = self.background.size
232 | self.render_size = min(self.width, self.height)
233 | self.mdpx, self.mdpy = self.render_size / 2, self.render_size / 2
234 | self.line_thick = line_thick
235 | if style in [1, 2, 4, 6, 7, 11, 12, 15, 16, 21, 22]:
236 | self.rad_min = rad_min + line_thick * 1.5
237 | self.rad_max = rad_max - line_thick * 1.5
238 | elif style in [3, 5]:
239 | self.rad_min = rad_min + line_thick / 2
240 | self.rad_max = rad_max - line_thick * 1.5
241 | elif style in [8]:
242 | self.rad_min = rad_min + line_thick * 1.5
243 | self.rad_max = rad_max
244 | elif style in [18]:
245 | self.rad_min = rad_min
246 | self.rad_max = rad_max
247 | else:
248 | self.rad_min = rad_min + line_thick / 2
249 | self.rad_max = rad_max - line_thick / 2
250 | self.rad_div = self.rad_max - self.rad_min
251 | self.blur = blur
252 | self.style = style
253 |
254 | def getFrame(self, hist, amplify=5, color_mode="color4x", bright=1.0, saturation=1.0, use_glow=True, rotate=0.0,
255 | fps=30.0, frame_pt=0, bg_mode=0, fg_img=None, fg_resize=1.0, quality=3, preview=False):
256 | bins = hist.shape[0]
257 |
258 | quality_list = [1, 1, 2, 2, 4, 8]
259 | ratio = quality_list[quality] # Antialiasing ratio
260 |
261 | line_thick = int(round(self.line_thick * ratio))
262 | line_thick_bold = int(round(self.line_thick * ratio * 1.5))
263 | line_thick_slim = int(round(self.line_thick * ratio / 2))
264 |
265 | canvas = Image.new('RGBA', (self.render_size * ratio, self.render_size * ratio), (255, 255, 255, 0))
266 | draw = ImageDraw.Draw(canvas)
267 |
268 | line_graph_prev = None
269 |
270 | if self.style in [0, 1, 2, 3, 4, 5, 6, 7, 18, 19, 20, 21, 22]:
271 | hist = np.power(hist * 1.5, 1.5)
272 | hist = np.clip(hist * amplify, 0, 1)
273 |
274 | for i in range(bins): # Draw Spectrum
275 | color = getColor(bins, i, color_mode, bright, saturation)
276 | if self.style == 0: # Solid Line
277 | line_points = [self.getAxis(bins, i, self.rad_min, ratio),
278 | self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio)]
279 | draw.line(line_points, width=line_thick, fill=color)
280 | circle(draw, line_points[0], line_thick_slim, color)
281 | circle(draw, line_points[1], line_thick_slim, color)
282 |
283 | elif self.style == 1: # Dot Line
284 | p_gap = line_thick_bold
285 | p_size = line_thick_bold
286 | if p_gap + p_size > 0:
287 | p_n = int(((hist[i] * self.rad_div) + p_gap) / (p_gap + p_size))
288 | circle(draw, self.getAxis(bins, i, self.rad_min, ratio), line_thick_bold, color)
289 | for ip in range(p_n):
290 | p_rad = (p_gap + p_size) * ip
291 | circle(draw, self.getAxis(bins, i, self.rad_min + p_rad, ratio), line_thick_bold, color)
292 |
293 | elif self.style == 2: # Single Dot
294 | circle(draw, self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio), line_thick_bold,
295 | color)
296 |
297 | elif self.style == 3: # Stem Plot: Solid Single
298 | line_points = [self.getAxis(bins, i, self.rad_min, ratio),
299 | self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio)]
300 | draw.line(line_points, width=line_thick, fill=color)
301 | circle(draw, line_points[0], line_thick_slim, color)
302 | circle(draw, line_points[1], line_thick_bold, color)
303 |
304 | elif self.style == 4: # Stem Plot: Solid Double
305 | line_points = [self.getAxis(bins, i, self.rad_min, ratio),
306 | self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio)]
307 | draw.line(line_points, width=line_thick, fill=color)
308 | circle(draw, line_points[0], line_thick_bold, color)
309 | circle(draw, line_points[1], line_thick_bold, color)
310 |
311 | elif self.style == 5: # Stem Plot: Dashed Single
312 | p_gap = line_thick_slim
313 | p_size = line_thick_slim
314 | if p_gap + p_size > 0:
315 | p_n = int(((hist[i] * self.rad_div) + p_size) / (p_gap + p_size))
316 | for ip in range(p_n):
317 | p_rad = (p_gap + p_size) * ip
318 | circle(draw, self.getAxis(bins, i, self.rad_min + p_rad, ratio), line_thick_slim, color)
319 | circle(draw, self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio), line_thick_bold,
320 | color)
321 |
322 | elif self.style == 6: # Stem Plot: Dashed Double
323 | p_gap = line_thick_slim
324 | p_size = line_thick_slim
325 | if p_gap + p_size > 0:
326 | p_n = int(((hist[i] * self.rad_div) + p_size) / (p_gap + p_size))
327 | for ip in range(p_n):
328 | p_rad = (p_gap + p_size) * ip
329 | circle(draw, self.getAxis(bins, i, self.rad_min + p_rad, ratio), line_thick_slim, color)
330 | circle(draw, self.getAxis(bins, i, self.rad_min, ratio), line_thick_bold, color)
331 | circle(draw, self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio), line_thick_bold,
332 | color)
333 |
334 | elif self.style == 7: # Double Dot
335 | circle(draw, self.getAxis(bins, i, self.rad_min, ratio), line_thick_bold, color)
336 | circle(draw, self.getAxis(bins, i, self.rad_min + hist[i] * self.rad_div, ratio), line_thick_bold,
337 | color)
338 |
339 | elif self.style == 8: # Concentric
340 | if i % 12 == 0:
341 | lower = i
342 | upper = i + 11
343 | if upper >= len(hist):
344 | upper = len(hist) - 1
345 | local_mean = np.mean(hist[-upper - 1:-lower - 1]) * 2
346 | if local_mean > 1:
347 | local_mean = 1
348 | radius = self.rad_min + local_mean * self.rad_div
349 | left = (self.mdpx - radius) * ratio
350 | right = (self.mdpx + radius) * ratio
351 | up = (self.mdpy - radius) * ratio
352 | down = (self.mdpy + radius) * ratio
353 | draw.ellipse((left, up, right, down), fill=None, outline=color, width=line_thick_bold)
354 |
355 | elif self.style == 9: # Classic Line: Center
356 | mid_y = self.mdpy * ratio
357 | y_scale = 0.85
358 | low = mid_y + self.rad_max * ratio * hist[i] * y_scale
359 | up = mid_y - self.rad_max * ratio * hist[i] * y_scale
360 | gap = self.rad_max * ratio * 2 / (bins - 1)
361 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
362 | line_points = [(x_offset, low), (x_offset, up)]
363 | draw.line(line_points, width=line_thick, fill=color)
364 | circle(draw, line_points[0], line_thick_slim, color)
365 | circle(draw, line_points[1], line_thick_slim, color)
366 |
367 | elif self.style == 10: # Classic Line: Bottom
368 | mid_y = self.mdpy * ratio
369 | y_scale = 0.85
370 | low = mid_y + self.rad_max * ratio * y_scale
371 | up = low - self.rad_max * ratio * hist[i] * y_scale * 2
372 | gap = self.rad_max * ratio * 2 / (bins - 1)
373 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
374 | line_points = [(x_offset, low), (x_offset, up)]
375 | draw.line(line_points, width=line_thick, fill=color)
376 | circle(draw, line_points[0], line_thick_slim, color)
377 | circle(draw, line_points[1], line_thick_slim, color)
378 |
379 | elif self.style == 11: # Classic Round Dot: Center
380 | mid_y = self.mdpy * ratio
381 | y_scale = 0.85
382 | low = mid_y + self.rad_max * ratio * hist[i] * y_scale
383 | gap = self.rad_max * ratio * 2 / (bins - 1)
384 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
385 | p_gap = line_thick_bold * 2
386 | p_size = line_thick_bold
387 | if p_gap + p_size > 0:
388 | p_n = int((low - mid_y + p_gap) / (p_gap + p_size))
389 | if p_n < 1:
390 | p_n = 1
391 | for ip in range(p_n):
392 | d_y = ip * (p_gap + p_size)
393 | circle(draw, (x_offset, mid_y + d_y), line_thick_bold, color)
394 | circle(draw, (x_offset, mid_y - d_y), line_thick_bold, color)
395 |
396 | elif self.style == 12: # Classic Round Dot: Bottom
397 | mid_y = self.mdpy * ratio
398 | y_scale = 0.85
399 | low = mid_y + self.rad_max * ratio * y_scale
400 | up = low - self.rad_max * ratio * hist[i] * y_scale * 2
401 | gap = self.rad_max * ratio * 2 / (bins - 1)
402 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
403 | p_gap = line_thick_bold * 2
404 | p_size = line_thick_bold
405 | if p_gap + p_size > 0:
406 | p_n = int((low - up + p_gap) / (p_gap + p_size))
407 | if p_n < 1:
408 | p_n = 1
409 | for ip in range(p_n):
410 | p_y = low - ip * (p_gap + p_size)
411 | circle(draw, (x_offset, p_y), line_thick_bold, color)
412 |
413 | elif self.style == 13: # Classic Square Dot: Center
414 | mid_y = self.mdpy * ratio
415 | y_scale = 0.85
416 | low = mid_y + self.rad_max * ratio * hist[i] * y_scale
417 | gap = self.rad_max * ratio * 2 / (bins - 1)
418 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
419 | p_gap = line_thick_bold * 2
420 | p_size = line_thick_bold
421 | if p_gap + p_size > 0:
422 | p_n = int((low - mid_y + p_gap) / (p_gap + p_size))
423 | if p_n < 1:
424 | p_n = 1
425 | for ip in range(p_n):
426 | d_y = ip * (p_gap + p_size)
427 | rectangle(draw, (x_offset, mid_y + d_y), line_thick_bold, color)
428 | rectangle(draw, (x_offset, mid_y - d_y), line_thick_bold, color)
429 |
430 | elif self.style == 14: # Classic Square Dot: Bottom
431 | mid_y = self.mdpy * ratio
432 | y_scale = 0.85
433 | low = mid_y + self.rad_max * ratio * y_scale
434 | up = low - self.rad_max * ratio * hist[i] * y_scale * 2
435 | gap = self.rad_max * ratio * 2 / (bins - 1)
436 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
437 | p_gap = line_thick_bold * 2
438 | p_size = line_thick_bold
439 | if p_gap + p_size > 0:
440 | p_n = int((low - up + p_gap) / (p_gap + p_size))
441 | if p_n < 1:
442 | p_n = 1
443 | for ip in range(p_n):
444 | p_y = low - ip * (p_gap + p_size)
445 | rectangle(draw, (x_offset, p_y), line_thick_bold, color)
446 |
447 | elif self.style == 15: # Classic Rectangle: Center
448 | mid_y = self.mdpy * ratio
449 | y_scale = 0.85
450 | low = mid_y + self.rad_max * ratio * hist[i] * y_scale
451 | up = mid_y - self.rad_max * ratio * hist[i] * y_scale
452 | gap = self.rad_max * ratio * 2 / (bins - 1)
453 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
454 | draw.rectangle((x_offset - line_thick_bold, low + line_thick_bold, x_offset + line_thick_bold,
455 | up - line_thick_bold), fill=color)
456 |
457 | elif self.style == 16: # Classic Rectangle: Bottom
458 | mid_y = self.mdpy * ratio
459 | y_scale = 0.85
460 | low = mid_y + self.rad_max * ratio * y_scale
461 | up = low - self.rad_max * ratio * hist[i] * y_scale * 2
462 | gap = self.rad_max * ratio * 2 / (bins - 1)
463 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
464 | draw.rectangle((x_offset - line_thick_bold, low + line_thick_bold, x_offset + line_thick_bold,
465 | up - line_thick_bold), fill=color)
466 |
467 | elif self.style == 17: # Line Graph
468 | mid_y = self.mdpy * ratio
469 | y_scale = 0.85
470 | low = mid_y + self.rad_max * ratio * y_scale
471 | up = low - self.rad_max * ratio * hist[i] * y_scale * 2
472 | gap = self.rad_max * ratio * 2 / (bins - 1)
473 | x_offset = gap * i + self.mdpx * ratio - self.rad_max * ratio
474 | if line_graph_prev is None:
475 | line_graph_prev = [(x_offset, low), (x_offset, up)]
476 | draw.line(((x_offset, low), (x_offset, up)), width=line_thick, fill=color)
477 | circle(draw, (x_offset, low), line_thick_slim, color)
478 | circle(draw, (x_offset, up), line_thick_slim, color)
479 |
480 | draw.line((line_graph_prev[1], (x_offset, up)), width=line_thick, fill=color)
481 | circle(draw, line_graph_prev[1], line_thick_slim, color)
482 | circle(draw, (x_offset, up), line_thick_slim, color)
483 |
484 | if i >= bins - 1:
485 | draw.line(((x_offset, low), (x_offset, up)), width=line_thick, fill=color)
486 | circle(draw, (x_offset, low), line_thick_slim, color)
487 | circle(draw, (x_offset, up), line_thick_slim, color)
488 | line_graph_prev = [(x_offset, low), (x_offset, up)]
489 |
490 | elif self.style == 18: # Zooming Circles
491 | center_rad = (self.rad_max - self.rad_min) / 2 + self.rad_min
492 | center = self.getAxis(bins, i, center_rad, ratio)
493 | center_next = self.getAxis(bins, i + 1, center_rad, ratio)
494 | center_gap = np.sqrt((center_next[0] - center[0]) ** 2 + (center_next[1] - center[1]) ** 2) / 2 * ratio
495 | max_gap = min(self.rad_div * ratio, center_gap)
496 | factor = np.power(np.clip(self.line_thick * 20 / min(self.width, self.height), 0.0, 1.0), 0.3)
497 | rad_draw = int(round(hist[i] * factor * max_gap / 2))
498 | circle(draw, center, rad_draw, color)
499 |
500 | elif self.style == 19: # Solid Line: Center
501 | line_points = [
502 | self.getAxis(bins, i, self.rad_min + self.rad_div / 2 - hist[i] * self.rad_div / 2, ratio),
503 | self.getAxis(bins, i, self.rad_min + self.rad_div / 2 + hist[i] * self.rad_div / 2, ratio)]
504 | draw.line(line_points, width=line_thick, fill=color)
505 | circle(draw, line_points[0], line_thick_slim, color)
506 | circle(draw, line_points[1], line_thick_slim, color)
507 |
508 | elif self.style == 20: # Solid Line: Reverse
509 | line_points = [
510 | self.getAxis(bins, i, self.rad_min + self.rad_div - hist[i] * self.rad_div, ratio),
511 | self.getAxis(bins, i, self.rad_min + self.rad_div, ratio)]
512 | draw.line(line_points, width=line_thick, fill=color)
513 | circle(draw, line_points[0], line_thick_slim, color)
514 | circle(draw, line_points[1], line_thick_slim, color)
515 |
516 | elif self.style == 21: # Double Dot: Center
517 | circle(draw, self.getAxis(bins, i, self.rad_min + self.rad_div / 2 - hist[i] * self.rad_div / 2, ratio),
518 | line_thick_bold, color)
519 | circle(draw, self.getAxis(bins, i, self.rad_min + self.rad_div / 2 + hist[i] * self.rad_div / 2, ratio),
520 | line_thick_bold,
521 | color)
522 |
523 | elif self.style == 22: # Double Dot: Reverse
524 | circle(draw,
525 | self.getAxis(bins, i, self.rad_min + self.rad_div - hist[i] * self.rad_div, ratio),
526 | line_thick_bold, color)
527 | circle(draw, self.getAxis(bins, i, self.rad_min + self.rad_div, ratio), line_thick_bold,
528 | color)
529 |
530 | else: # Othewise (-1): No Spectrum
531 | pass
532 |
533 | if use_glow:
534 | canvas = glowFx(canvas, self.blur * ratio, 1.5)
535 |
536 | canvas = canvas.resize((self.render_size, self.render_size), Image.ANTIALIAS)
537 |
538 | if fg_img is not None and bg_mode > -2 and (not bg_mode == 2):
539 | if rotate != 0:
540 | angle = -(rotate * frame_pt / fps / 60) * 360
541 | fg_img = fg_img.rotate(angle, resample=Image.BICUBIC)
542 | if fg_resize != 1.0:
543 | fg_img = resizeRatio(fg_img, fg_resize)
544 | canvas = pasteMiddle(fg_img, canvas)
545 |
546 | return pasteMiddle(canvas, self.background.copy())
547 |
548 | def getAxis(self, bins, index, radius, ratio):
549 | div = 2 * np.pi / bins
550 | angle = div * index - np.pi / 2 - np.pi * 5 / 6
551 | ox = (self.mdpx + radius * np.cos(angle)) * ratio
552 | oy = (self.mdpy + radius * np.sin(angle)) * ratio
553 | return ox, oy
554 |
555 |
556 | def squareSpace(start, end, num):
557 | sqrt_start = np.sqrt(start)
558 | sqrt_end = np.sqrt(end)
559 | sqrt_space = np.linspace(sqrt_start, sqrt_end, num=num)
560 | return np.round(np.power(sqrt_space, 2)).astype(int)
561 |
562 |
563 | def linearRange(startx, endx, starty, endy, x):
564 | return (endy - starty) / (endx - startx) * (x - startx) + starty
565 |
566 |
567 | psy_map = [[10, 32, 3.0, 2.5],
568 | [32, 48, 2.5, 1.0],
569 | [48, 64, 1.0, 1.0],
570 | [64, 150, 1.0, 1.3],
571 | [150, 2000, 1.3, 1.0],
572 | [2000, 8000, 1.0, 1.2],
573 | [8000, 24000, 1.2, 4.0]]
574 |
575 |
576 | def psyModel(freq): # Get psychoacoustical model
577 | for item in psy_map:
578 | if item[0] <= freq < item[1]:
579 | return linearRange(item[0], item[1], item[2], item[3], freq)
580 | return 0
581 |
582 |
583 | if __name__ == '__main__':
584 | #print(linearRange(8000, 20000, 1, 0, 10000))
585 | pass
586 |
--------------------------------------------------------------------------------
/SourceCode/FanBlender.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | Fanseline Visualizer
6 | 帆室邻音频可视化视频制作工具
7 | https://github.com/FerryYoungFan/FanselineVisualizer
8 |
9 | By Twitter @FanKetchup
10 | https://twitter.com/FanKetchup
11 | """
12 |
13 | __version__ = "1.1.6" # Work with PYQT5
14 |
15 | from FanWheels_PIL import *
16 | from FanWheels_ffmpeg import *
17 | from FanAudioVisualizer import AudioVisualizer, AudioAnalyzer
18 |
19 | import imageio
20 | import imageio_ffmpeg
21 | import numpy as np
22 |
23 | import threading, os, sys
24 | from PyQt5 import QtCore # Notice: You can use time.sleep() if you are not using PyQt5
25 |
26 |
27 | class blendingThread(threading.Thread):
28 | def __init__(self, threadID, name, counter, parent, total_thread, thread_num):
29 | threading.Thread.__init__(self)
30 | self.threadID = threadID
31 | self.name = name
32 | self.counter = counter
33 | self.parent = parent
34 | self.total_thread = total_thread
35 | self.thread_num = thread_num
36 | self.frame_pt = thread_num
37 |
38 | def run(self):
39 | while self.frame_pt < self.parent.total_frames and self.parent.isRunning:
40 | self.parent.frame_lock[self.frame_pt] = self.thread_num + 1
41 | self.parent.frame_buffer[self.frame_pt] = self.parent.visualizer.getFrame(
42 | hist=self.parent.analyzer.getHistAtFrame(self.frame_pt),
43 | amplify=self.parent._amplify,
44 | color_mode=self.parent.spectrum_color,
45 | bright=self.parent._bright,
46 | saturation=self.parent._saturation,
47 | use_glow=self.parent.use_glow,
48 | rotate=self.parent.rotate,
49 | fps=self.parent.fps,
50 | frame_pt=self.frame_pt,
51 | bg_mode=self.parent.bg_mode,
52 | fg_img=self.parent.fg_img,
53 | fg_resize=self.parent.analyzer.getBeatAtFrame(self.frame_pt),
54 | quality=self.parent.quality)
55 | self.frame_pt = self.frame_pt + self.total_thread
56 | print("Thread {0} -end".format(self.thread_num))
57 |
58 |
59 | class encodingThread(threading.Thread):
60 | def __init__(self, threadID, name, counter, parent):
61 | threading.Thread.__init__(self)
62 | self.threadID = threadID
63 | self.name = name
64 | self.counter = counter
65 | self.parent = parent
66 |
67 | def run(self):
68 | realTimePrev = None
69 | while self.parent.encoder_pt < self.parent.total_frames and self.parent.isRunning:
70 | if self.parent.frame_buffer[self.parent.encoder_pt] is not None:
71 | self.parent.writer.append_data(np.asarray(self.parent.frame_buffer[self.parent.encoder_pt]))
72 | realTimePrev = self.parent.frame_buffer[self.parent.encoder_pt]
73 | self.parent.frame_buffer[self.parent.encoder_pt] = None
74 | self.parent.encoder_pt = self.parent.encoder_pt + 1
75 | else:
76 | if realTimePrev:
77 | self.parent.previewRealTime(realTimePrev)
78 | realTimePrev = None
79 | self.parent.log("Processing:{0}/{1}".format(self.parent.encoder_pt, self.parent.total_frames))
80 | self.parent.progress(self.parent.encoder_pt, self.parent.total_frames)
81 |
82 | # The following 3 lines can be replaced by time.sleep(0.5) if you are not using PyQt5
83 | loop = QtCore.QEventLoop()
84 | QtCore.QTimer.singleShot(500, loop.quit)
85 | loop.exec_()
86 |
87 | self.parent.previewRealTime(realTimePrev)
88 | realTimePrev = None
89 | self.parent.log("Processing:{0}/{1}".format(self.parent.encoder_pt, self.parent.total_frames))
90 | self.parent.progress(self.parent.encoder_pt, self.parent.total_frames)
91 | if self.parent.encoder_pt >= self.parent.total_frames:
92 | self.parent.log("Rendering Done!")
93 |
94 |
95 | class FanBlender:
96 | def __init__(self):
97 | self.color_dic = {
98 | "Rainbow 4x": "color4x",
99 | "Rainbow 2x": "color2x",
100 | "Rainbow 1x": "color1x",
101 | "White": "white",
102 | "Black": "black",
103 | "Gray": "gray",
104 | "Red": "red",
105 | "Green": "green",
106 | "Blue": "blue",
107 | "Yellow": "yellow",
108 | "Magenta": "magenta",
109 | "Purple": "purple",
110 | "Cyan": "cyan",
111 | "Light Green": "lightgreen",
112 | "Gradient: Green - Blue": "green-blue",
113 | "Gradient: Magenta - Purple": "magenta-purple",
114 | "Gradient: Red - Yellow": "red-yellow",
115 | "Gradient: Yellow - Green": "yellow-green",
116 | "Gradient: Blue - Purple": "blue-purple",
117 | }
118 | self.image_path = None
119 | self.bg_path = None
120 | self.sound_path = None
121 | self.logo_path = None
122 | self.output_path = None
123 | self.bg_img = None
124 | self.logofile = None
125 |
126 | self.text_bottom = ""
127 | self.font = getPath("Source/font.otf")
128 |
129 | self.frame_width = 540
130 | self.frame_height = 540
131 | self.fps = 30
132 | self.bit_rate = 0.6 # in Mb/s
133 | self.audio_bit_rate = 320 # in kb/s
134 | self.audio_normal = False
135 |
136 | self.spectrum_color = "color4x"
137 | self._bright = 1.0
138 | self._saturation = 1.0
139 | self.bins = 80
140 | self.smooth = 0
141 | self.fq_low = 20
142 | self.fq_up = 1500
143 | self.scalar = 1.0
144 |
145 | self._debug_bg = False
146 | self._temp_audio_path = getPath("Temp/temp.wav")
147 | self._temp_video_path = getPath("Temp/temp.mp4")
148 |
149 | try:
150 | self._ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
151 | except:
152 | self._ffmpeg_path = None
153 |
154 | self._frame_size = 0
155 | self._relsize = 1.0
156 | self._font_size = 0
157 | self._text_color = (0, 0, 0, 0)
158 | self._text_glow = False
159 | self._yoffset = 0
160 | # A good ratio for text and frame size
161 |
162 | self._blur = 0
163 | self._blur_bg = 0
164 | self.blur_bg = True
165 | self.use_glow = False
166 | self.style = 0
167 | self.linewidth = 1.0
168 | self.rotate = 0
169 | self.beat_detect = 0
170 | self.low_range = 10
171 | self._line_thick = 0
172 |
173 | self._amplify = self.setAmplify()
174 |
175 | self.visualizer = None
176 | self.analyzer = None
177 | self.fg_img = None
178 |
179 | self.writer = None
180 | self.total_frames = None
181 | self.frame_buffer = []
182 | self.frame_pt = 0
183 | self.encoder_pt = 0
184 |
185 | self.isRunning = False
186 | self._console = None
187 |
188 | self.frame_lock = None
189 |
190 | self.bg_mode = 0
191 | self.quality = 3
192 | self.bg_blended = False
193 | self.ffmpegCheck()
194 |
195 | def calcRel(self):
196 | self._frame_size = min(self.frame_width, self.frame_height)
197 | self._font_size = int(round(30 / 1080 * self._frame_size * self._relsize))
198 | self._blur = int(round(2 / 1080 * self._frame_size))
199 | self._blur_bg = int(round(41 / 1080 * self._frame_size))
200 | self._line_thick = self.linewidth * 4 / 1080 * self._frame_size
201 | self._yoffset = clip(self.frame_height - self._font_size * 2.1, self.frame_height * 0.95,
202 | self.frame_height * 0.95 - self._font_size)
203 |
204 | def ffmpegCheck(self):
205 | if self._ffmpeg_path is None:
206 | self.log("Error: FFMPEG not found!")
207 | if self._console:
208 | try:
209 | self._console.ffmpegWarning()
210 | except:
211 | pass
212 | return False
213 | else:
214 | return True
215 |
216 | def setAmplify(self):
217 | return self.scalar * 5 * np.sqrt(self.bins / 80) * np.power(1500 / (self.fq_up - self.fq_low), 0.5)
218 |
219 | def ensure_dir(self, file_path):
220 | directory = os.path.dirname(file_path)
221 | if not os.path.exists(directory):
222 | os.makedirs(directory)
223 |
224 | def fileError(self, filepath):
225 | if filepath is not None:
226 | self.log("Error: File {0} does not exist.".format(filepath))
227 |
228 | def setConsole(self, console=None):
229 | if console:
230 | self._console = console
231 |
232 | def audioError(self):
233 | if self._console:
234 | try:
235 | self._console.audioWarning()
236 | except:
237 | pass
238 |
239 | def log(self, content=""):
240 | print(content)
241 | if self._console:
242 | try:
243 | self._console.log(content)
244 | except:
245 | pass
246 |
247 | def progress(self, value, total):
248 | if self._console:
249 | try:
250 | self._console.progressbar(value, total)
251 | except:
252 | return
253 |
254 | def freezeConsole(self, flag=True):
255 | if self._console:
256 | try:
257 | self._console.freeze(flag)
258 | except:
259 | return
260 |
261 | def setFilePath(self, image_path=None, bg_path=None, sound_path=None, logo_path=None):
262 | self.image_path = self.imgFileCheck(image_path)
263 | self.bg_path = self.imgFileCheck(bg_path)
264 | self.logo_path = self.imgFileCheck(logo_path)
265 |
266 | if sound_path is not None:
267 | if os.path.isfile(sound_path):
268 | self.sound_path = sound_path
269 | else:
270 | self.fileError(sound_path)
271 |
272 | def imgFileCheck(self, file_path):
273 | if file_path is None:
274 | return None
275 | if isinstance(file_path, tuple) and (len(file_path) in [3, 4]):
276 | self.visualizer = None
277 | self.bg_blended = False
278 | if len(file_path) == 3:
279 | return file_path + (255,)
280 | return file_path # RGBA Color Tuple
281 | if os.path.isfile(file_path):
282 | self.visualizer = None
283 | self.bg_blended = False
284 | return file_path
285 | return None
286 |
287 | def setOutputPath(self, output_path="", filename=""):
288 | if not filename:
289 | filename = "Visualize.mp4"
290 | if output_path:
291 | self.ensure_dir(os.path.join(output_path, filename))
292 | self.output_path = cvtFileName(os.path.join(output_path, filename), "mp4")
293 |
294 | def setText(self, text="", font="", relsize=None, text_color=(255, 255, 255, 255), text_glow=None):
295 | self.text_bottom = text
296 | if not font:
297 | if os.path.exists(getPath("Source/font.otf")):
298 | font = getPath("Source/font.otf")
299 | elif os.path.exists(getPath("Source/font.ttf")):
300 | font = getPath("Source/font.ttf")
301 | else:
302 | font = "Arial.ttf"
303 | if relsize is not None:
304 | self._relsize = clip(relsize, 0.1, 5)
305 | if text_color is not None:
306 | self._text_color = text_color
307 | if text_glow is not None:
308 | self._text_glow = text_glow
309 | self.font = font
310 |
311 | def setSpec(self, bins=None, lower=None, upper=None, color=None, bright=None, saturation=None, scalar=None,
312 | smooth=None, style=None, linewidth=None, rotate=None, beat_detect=None, low_range=None):
313 | if bins is not None:
314 | self.bins = int(clip(bins, 2, 250))
315 |
316 | if lower is not None:
317 | self.fq_low = int(clip(lower, 16, 22000))
318 |
319 | if upper is not None:
320 | upper = int(clip(upper, 16, 22000))
321 | if upper <= self.fq_low:
322 | upper = self.fq_low + 1
323 | self.fq_up = int(upper)
324 |
325 | if color is not None:
326 | self.spectrum_color = color
327 |
328 | if bright is not None:
329 | self._bright = clip(bright, 0, 1)
330 |
331 | if saturation is not None:
332 | self._saturation = clip(saturation, 0, 1)
333 |
334 | if scalar is not None:
335 | self.scalar = clip(scalar, 0.1, 10)
336 |
337 | if smooth is not None:
338 | self.smooth = int(round(clip(smooth, 0, 15)))
339 |
340 | if style is not None:
341 | self.style = style
342 |
343 | if linewidth is not None:
344 | self.linewidth = clip(linewidth, 0.01, 50)
345 |
346 | if rotate is not None:
347 | self.rotate = float(rotate)
348 |
349 | if beat_detect is not None:
350 | self.beat_detect = clip(beat_detect, 0, 100)
351 |
352 | if low_range is not None:
353 | self.low_range = clip(low_range, 0, 100)
354 |
355 | self._amplify = self.setAmplify()
356 | self.visualizer = None
357 |
358 | def setVideoInfo(self, width=None, height=None, fps=None, br_Mbps=None, blur_bg=None,
359 | use_glow=None, bg_mode=None, quality=None):
360 | if width is not None:
361 | self.frame_width = int(clip(width, 16, 4096))
362 | if height is not None:
363 | self.frame_height = int(clip(height, 16, 4096))
364 | if fps is not None:
365 | self.fps = clip(fps, 1, 120)
366 | if br_Mbps is None:
367 | br_Mbps = 15 * (self.frame_width * self.frame_height * self.fps) / (1920 * 1080 * 30)
368 | self.bit_rate = br_Mbps
369 | else:
370 | self.bit_rate = clip(br_Mbps, 0.01, 200)
371 |
372 | if blur_bg is not None:
373 | self.blur_bg = blur_bg
374 |
375 | if use_glow is not None:
376 | self.use_glow = use_glow
377 |
378 | if bg_mode is not None:
379 | self.bg_mode = bg_mode
380 |
381 | if quality is not None:
382 | self.quality = int(clip(quality, 1, 5))
383 |
384 | self.visualizer = None
385 | self.bg_blended = False
386 |
387 | def setAudioInfo(self, normal=None, br_kbps=None):
388 | if normal is not None:
389 | self.audio_normal = normal
390 |
391 | if br_kbps is not None:
392 | self.audio_bit_rate = int(round(clip(br_kbps, 5, 10000)))
393 |
394 | def genBackground(self, forceRefresh=False, preview=False):
395 | self.calcRel()
396 | if not self.bg_blended or forceRefresh:
397 | self.log("Rendering Background...")
398 |
399 | image = openImage(self.image_path, "RGBA",
400 | ["Source/fallback.png", "Source/fallback.jpg", (127, 127, 127, 127)])
401 | bg = openImage(self.bg_path, "RGB", [None])
402 | self.logofile = openImage(self.logo_path, "RGBA", [None])
403 |
404 | if bg is None:
405 | bg = image.copy()
406 |
407 | if self.bg_mode < 0:
408 | self.bg_img = Image.new("RGBA", (self.frame_width, self.frame_height), (0, 0, 0, 0))
409 | else:
410 | if self.blur_bg:
411 | self.bg_img = genBG(bg, size=(self.frame_width, self.frame_height), blur=self._blur_bg, bright=0.3)
412 | else:
413 | self.bg_img = genBG(bg, size=(self.frame_width, self.frame_height), blur=0, bright=1.0)
414 | self.log("Rendering Background... Done!")
415 |
416 | if (self.bg_mode >= -1) and (not self.bg_mode == 2):
417 | self.fg_img = cropCircle(image, size=self._frame_size // 2, quality=self.quality)
418 | base_fg = self.fg_img.split()[-1]
419 | base_w = base_fg.size[0] + int(round(self._blur * 4))
420 | base_h = base_fg.size[1] + int(round(self._blur * 4))
421 | base_fg = pasteMiddle(base_fg, Image.new("L", (base_w, base_h), 0))
422 | base_fg = base_fg.filter(ImageFilter.GaussianBlur(radius=self._blur * 2))
423 | white_fg = Image.new("L", (base_w, base_h), 0)
424 | self.fg_img = pasteMiddle(self.fg_img, Image.merge('RGBA', (white_fg, white_fg, white_fg, base_fg)))
425 |
426 | self.bg_img = glowText(self.bg_img, self.text_bottom, self._font_size, self.font, color=self._text_color,
427 | blur=self._blur, logo=self.logofile, use_glow=self._text_glow, yoffset=self._yoffset)
428 | self.bg_blended = True
429 |
430 | rad_max = (self._yoffset - self.frame_height / 2) * 0.97
431 | if self.text_bottom is None or self.text_bottom == "":
432 | if self.logo_path is None or not os.path.exists(self.logo_path):
433 | rad_max = self.frame_height / 2
434 | self.visualizer = AudioVisualizer(img=self.bg_img,
435 | rad_min=self._frame_size / 4 * 1.1,
436 | rad_max=min(rad_max, self._frame_size / 2.1),
437 | line_thick=self._line_thick,
438 | blur=self._blur, style=self.style)
439 |
440 | def previewBackground(self, localViewer=False, forceRefresh=False):
441 | self.genBackground(forceRefresh)
442 | xs = np.linspace(0, 10 * np.pi, self.bins)
443 | ys = (0.5 + 0.5 * np.cos(xs)) * self.scalar
444 | frame_sample = self.visualizer.getFrame(hist=ys, amplify=1, color_mode=self.spectrum_color, bright=self._bright,
445 | saturation=self._saturation, use_glow=self.use_glow, rotate=self.rotate,
446 | fps=30, frame_pt=90, bg_mode=self.bg_mode, fg_img=self.fg_img,
447 | fg_resize=(self.beat_detect / 100) * 0.05 + 1, quality=self.quality)
448 | if localViewer:
449 | frame_sample.show()
450 | return frame_sample
451 |
452 | def previewRealTime(self, img):
453 | if self._console:
454 | try:
455 | self._console.realTime(img)
456 | except:
457 | return
458 |
459 | def genAnalyzer(self):
460 | if not self.ffmpegCheck():
461 | return
462 | if self.sound_path is None or not os.path.exists(str(self.sound_path)):
463 | self.log("Error: Audio file not found!")
464 | return
465 | try:
466 | self.ensure_dir(self._temp_audio_path)
467 | toTempWaveFile(self.sound_path, self._temp_audio_path, self._console)
468 | except:
469 | self.fileError(self.sound_path)
470 | self.analyzer = None
471 | return
472 | try:
473 | self.analyzer = AudioAnalyzer(file_path=self._temp_audio_path,
474 | ffmpeg_path=self._ffmpeg_path,
475 | fps=self.fps,
476 | fq_low=self.fq_low,
477 | fq_up=self.fq_up,
478 | bins=self.bins,
479 | smooth=self.smooth,
480 | beat_detect=self.beat_detect,
481 | low_range=self.low_range)
482 | except:
483 | self.fileError(self._temp_audio_path)
484 | self.analyzer = None
485 | return
486 |
487 | def runBlending(self):
488 | if self.isRunning:
489 | return
490 | self.removeTemp()
491 | self.freezeConsole(True)
492 | self.genBackground(forceRefresh=True)
493 | if not self.ffmpegCheck():
494 | self.freezeConsole(False)
495 | return
496 | self.isRunning = True
497 | self.genAnalyzer()
498 |
499 | if not self.isRunning:
500 | self.removeTemp()
501 | self.isRunning = False
502 | self.freezeConsole(False)
503 | return
504 |
505 | if self._temp_audio_path is None or not os.path.exists(str(self._temp_audio_path)):
506 | self.freezeConsole(False)
507 | self.audioError()
508 | self.isRunning = False
509 | return
510 |
511 | if self.analyzer is None:
512 | self.log("Error: Analyzer not found")
513 | self.isRunning = False
514 | self.freezeConsole(False)
515 | return
516 | if self.visualizer is None:
517 | self.log("Error: Visualizer not found")
518 | self.isRunning = False
519 | self.freezeConsole(False)
520 | return
521 |
522 | if self.bg_mode < 0:
523 | self.writer = imageio.get_writer(cvtFileName(self._temp_video_path, "mov"),
524 | fps=self.fps,
525 | macro_block_size=None,
526 | format='FFMPEG', codec="png", pixelformat="rgba")
527 | else:
528 | self.writer = imageio.get_writer(self._temp_video_path, fps=self.fps, macro_block_size=None,
529 | bitrate=int(self.bit_rate * 1000000))
530 | self.total_frames = self.analyzer.getTotalFrames()
531 | self.frame_buffer = [None] * self.total_frames
532 | self.encoder_pt = 0
533 |
534 | thread_encode = encodingThread(1, "Thread-1", 1, self)
535 | thread_stack = []
536 | thread_num = 4
537 | cpu_count = os.cpu_count()
538 | if cpu_count is not None:
539 | thread_num = cpu_count // 2
540 | if thread_num < 1:
541 | thread_num = 1
542 | print("CPU Thread for Rendering: " + str(thread_num))
543 |
544 | self.frame_lock = np.zeros(self.total_frames, dtype=np.uint8)
545 | for ith in range(thread_num):
546 | thread_stack.append(blendingThread(ith + 2, "Thread-" + str(ith + 2), ith + 2, self, thread_num, ith))
547 | thread_encode.start()
548 | for ith in range(thread_num):
549 | thread_stack[ith].start()
550 | thread_encode.join()
551 | for ith in range(thread_num):
552 | thread_stack[ith].join()
553 | self.writer.close()
554 | if self.isRunning:
555 | self.log("Output path: ")
556 | self.log(str(self.output_path))
557 | self.log("Combining Videos...")
558 | audio_br = str(self.audio_bit_rate) + "k"
559 | if self.bg_mode < 0:
560 | combineVideo(cvtFileName(self._temp_video_path, "mov"), self.sound_path,
561 | cvtFileName(self.output_path, "mov"), audio_br, self.audio_normal, console=self._console)
562 | else:
563 | combineVideo(self._temp_video_path, self.sound_path, self.output_path, audio_br, self.audio_normal,
564 | console=self._console)
565 | self.log("Combining Videos... Done!")
566 | else:
567 | self.log("Rendering Aborted!")
568 |
569 | self.analyzer = None
570 | self.removeTemp()
571 | self.isRunning = False
572 | self.freezeConsole(False)
573 |
574 | def getOutputPath(self):
575 | if self.bg_mode < 0:
576 | return cvtFileName(self.output_path, "mov")
577 | else:
578 | return cvtFileName(self.output_path, "mp4")
579 |
580 | def removeTemp(self):
581 | def removeFile(file_path):
582 | try:
583 | os.remove(file_path)
584 | except:
585 | pass
586 |
587 | removeFile(self._temp_audio_path)
588 | removeFile(self._temp_video_path)
589 | removeFile(cvtFileName(self._temp_video_path, "mov"))
590 |
591 |
592 | def clip(value, low_in=0.0, up_in=0.0):
593 | if value is None:
594 | return 0
595 | if low_in > up_in:
596 | low, up = up_in, low_in
597 | else:
598 | low, up = low_in, up_in
599 | if value < low:
600 | return low
601 | if value > up:
602 | return up
603 | return value
604 |
605 |
606 | def getPath(fileName): # for different operating systems
607 | path = os.path.join(os.path.dirname(sys.argv[0]), fileName)
608 | path = path.replace("\\", "/")
609 | return path
610 |
611 |
612 | if __name__ == '__main__':
613 | # Example of Using FanBlender
614 |
615 | fb = FanBlender() # Initialize Blender (Render)
616 |
617 | fb.setFilePath(image_path="Source/fallback.png",
618 | bg_path="Source/background.jpg",
619 | sound_path="Source/test.mp3",
620 | logo_path="Source/logo.png")
621 | """
622 | Set file path.
623 | You can also use RGBA color as path to generate a monocolor image: e.g. image_path=(255,0,0,255)
624 | """
625 |
626 | fb.setOutputPath(output_path="./Output",
627 | filename="test.mp4") # Set Output Path
628 |
629 | fb.setText(text="Your Text Here", font="Source/font.otf",
630 | relsize=1.0, text_color=(255, 255, 255, 255), text_glow=True)
631 | """
632 | Set text at the Bottom (Relative Font Size: 0.3 - 5.0)
633 | Text color format: RGBA
634 | """
635 |
636 | fb.setSpec(bins=60, lower=20, upper=1500,
637 | color=fb.color_dic["Gradient: Green - Blue"], bright=0.6, saturation=0.8,
638 | scalar=1.0, smooth=2,
639 | style=1, linewidth=1.0,
640 | rotate=1.5, beat_detect=50, low_range=10)
641 | """
642 | Set Spectrum:
643 | bins: Number of spectrums
644 | lower: Lower Frequency (Analyzer's Frequency) (in Hz)
645 | upper: Upper Frequency (Analyzer's Frequency) (in Hz)
646 | color: Color of Spectrum
647 | bright: Brightness of Spectrum
648 | saturation: Color Saturation of Spectrum
649 | scalar: Sensitivity (Scalar) of Analyzer (Default:1.0)
650 | smooth: Stabilize Spectrum (Range: 0 - 15)
651 | style: 0-22 for Different Spectrum Styles (-1 for None)
652 | linewidth: Relative Width of Spectrum Line (0.5-20)
653 | rotate: Rotate Foreground (r/min, Positive for Clockwise)
654 | beat_detect: Sensitivity of Beat Detector (in %)
655 | low_range: Low Frequency Range for Beat Detector (relative analyzer's frequency range) (in %)
656 | """
657 | fb.setVideoInfo(width=480, height=480, fps=30.0, br_Mbps=1.0,
658 | blur_bg=True, use_glow=True, bg_mode=0, quality=3)
659 | """
660 | Video info
661 | br_Mbps: Bit Rate of Video (Mbps)
662 | blur_bg: Blur the background
663 | use_glow: Add Glow Effect to Spectrum and Text
664 | bg_mode: 0: Normal Background, 2: Background Only, -1: Transparent Background, -2: Spectrum Only
665 | quality: Antialiasing Quality (1-5, default 3)
666 | """
667 | fb.setAudioInfo(normal=False, br_kbps=192) # Audio info
668 |
669 | fb.previewBackground(localViewer=True) # Preview before rendering
670 |
671 | fb.runBlending() # Render the video
672 |
--------------------------------------------------------------------------------
/SourceCode/FanBlender_Example.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from FanBlender import FanBlender
5 |
6 | """
7 | Audio Visualizer - Example
8 | By Twitter @FanKetchup
9 | https://github.com/FerryYoungFan/FanselineVisualizer
10 | """
11 |
12 | if __name__ == '__main__':
13 | # Example of Using FanBlender
14 |
15 | fb = FanBlender() # Initialize Blender (Render)
16 |
17 | fb.setFilePath(image_path="Source/fallback.png",
18 | bg_path="Source/background.jpg",
19 | sound_path="Source/test.mp3",
20 | logo_path="Source/logo.png")
21 | """
22 | Set file path.
23 | You can also use RGBA color as path to generate a monocolor image: e.g. image_path=(255,0,0,255)
24 | """
25 |
26 | fb.setOutputPath(output_path="./Output",
27 | filename="test.mp4") # Set Output Path
28 |
29 | fb.setText(text="Your Text Here", font="Source/font.otf",
30 | relsize=1.0, text_color=(255, 255, 255, 255), text_glow=True)
31 | """
32 | Set text at the Bottom (Relative Font Size: 0.3 - 5.0)
33 | Text color format: RGBA
34 | """
35 |
36 | fb.setSpec(bins=60, lower=20, upper=1500,
37 | color=fb.color_dic["Gradient: Green - Blue"], bright=0.6, saturation=0.8,
38 | scalar=1.0, smooth=2,
39 | style=1, linewidth=1.0,
40 | rotate=1.5, beat_detect=50, low_range=10)
41 | """
42 | Set Spectrum:
43 | bins: Number of spectrums
44 | lower: Lower Frequency (Analyzer's Frequency) (in Hz)
45 | upper: Upper Frequency (Analyzer's Frequency) (in Hz)
46 | color: Color of Spectrum
47 | bright: Brightness of Spectrum
48 | saturation: Color Saturation of Spectrum
49 | scalar: Sensitivity (Scalar) of Analyzer (Default:1.0)
50 | smooth: Stabilize Spectrum (Range: 0 - 15)
51 | style: 0-22 for Different Spectrum Styles (-1 for None)
52 | linewidth: Relative Width of Spectrum Line (0.5-20)
53 | rotate: Rotate Foreground (r/min, Positive for Clockwise)
54 | beat_detect: Sensitivity of Beat Detector (in %)
55 | low_range: Low Frequency Range for Beat Detector (relative analyzer's frequency range) (in %)
56 | """
57 | fb.setVideoInfo(width=480, height=480, fps=30.0, br_Mbps=1.0,
58 | blur_bg=True, use_glow=True, bg_mode=0, quality=3)
59 | """
60 | Video info
61 | br_Mbps: Bit Rate of Video (Mbps)
62 | blur_bg: Blur the background
63 | use_glow: Add Glow Effect to Spectrum and Text
64 | bg_mode: 0: Normal Background, 2: Background Only, -1: Transparent Background, -2: Spectrum Only
65 | quality: Antialiasing Quality (1-5, default 3)
66 | """
67 | fb.setAudioInfo(normal=False, br_kbps=192) # Audio info
68 |
69 | fb.previewBackground(localViewer=True) # Preview before rendering
70 |
71 | fb.runBlending() # Render the video
72 |
--------------------------------------------------------------------------------
/SourceCode/FanBlender_GUI.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from QtViewer import PhotoViewer, ImageSelectWindow
5 | from QtWindows import *
6 | from LanguagePack import *
7 | from FanBlender import FanBlender, getPath, __version__
8 | import threading, time, pickle
9 |
10 |
11 | class InfoBridge:
12 | def __init__(self, parent):
13 | self.parent = parent
14 | self.value = 0
15 | self.total = 100
16 | self.prepare = 0 # Prepare for audio pre-process
17 | self.combine = 0 # Final combination
18 | self.img_cache = None
19 |
20 | def log(self, content=""):
21 | pass
22 |
23 | def progressbar(self, value, total):
24 | self.value = value
25 | self.total = total
26 |
27 | def freeze(self, flag=True):
28 | if flag:
29 | self.parent.setWindowTitle(self.parent.windowName + " " + self.parent.lang["(Rendering...)"])
30 | self.parent.blendWindow.freezeWindow(True)
31 | else:
32 | self.parent.isRunning = False
33 |
34 | def realTime(self, img):
35 | self.img_cache = img
36 |
37 | def audioWarning(self):
38 | self.parent.error_log = 1
39 |
40 | def ffmpegWarning(self):
41 | self.parent.error_log = 2
42 |
43 |
44 | class MainWindow(QtWidgets.QWidget):
45 | def __init__(self, lang_in, vdic_in, lang_code_in, first_run_in):
46 | super(MainWindow, self).__init__()
47 | self.lang = lang_in
48 | self.lang_code = lang_code_in
49 | self.first_run = first_run_in
50 | self.windowName = self.lang["Fanseline Visualizer"] + " - v" + __version__
51 | self.setWindowTitle(self.windowName)
52 | setWindowIcons(self)
53 |
54 | self.fb = FanBlender()
55 | self.infoBridge = InfoBridge(self)
56 | self.fb.setConsole(self.infoBridge)
57 | self.vdic = vdic_in
58 | self.vdic_stack = []
59 |
60 | self.audio_formats = " (*.mp3;*.wav;*.ogg;*.aac;*.flac;*.ape;*.m4a;*.m4r;*.wma;*.mp2;*.mmf);;"
61 | self.audio_formats_arr = getFormats(self.audio_formats)
62 | self.video_formats = " (*.mp4;*.wmv;*.avi;*.flv;*.mov;*.mkv;*.rm;*.rmvb);;"
63 | self.video_formats_arr = getFormats(self.video_formats)
64 | self.image_formats = " (*.jpg;*.jpeg;*.png;*.gif;*.bmp;*.ico;*.dib;*.webp;*.tiff;*.tga;*.icns);;"
65 | self.image_formats_arr = getFormats(self.image_formats)
66 |
67 | left = QtWidgets.QFrame(self)
68 | left.setStyleSheet(viewerStyle)
69 | VBlayout_l = QtWidgets.QVBoxLayout(left)
70 | self.viewer = PhotoViewer(self)
71 | VBlayout_l.addWidget(self.viewer)
72 | VBlayout_l.setSpacing(0)
73 | VBlayout_l.setContentsMargins(0, 0, 0, 0)
74 |
75 | self.mainMenu = MainMenu(self)
76 | self.imageSelector = ImageSelectWindow(self)
77 | self.audioSetting = AudioSettingWindow(self)
78 | self.videoSetting = VideoSettingWindow(self)
79 | self.textWindow = TextWindow(self)
80 | self.imageSetting = ImageSettingWindow(self)
81 | self.spectrumColor = SpectrumColorWindow(self)
82 | self.spectrumStyle = SpectrumStyleWindow(self)
83 | self.blendWindow = BlendWindow(self)
84 | self.aboutWindow = AboutWindow(self)
85 |
86 | right = QtWidgets.QFrame(self)
87 | VBlayout_r = QtWidgets.QVBoxLayout(right)
88 | VBlayout_r.setAlignment(QtCore.Qt.AlignTop)
89 | VBlayout_r.addWidget(self.mainMenu)
90 | VBlayout_r.addWidget(self.imageSelector)
91 | VBlayout_r.addWidget(self.audioSetting)
92 | VBlayout_r.addWidget(self.videoSetting)
93 | VBlayout_r.addWidget(self.textWindow)
94 | VBlayout_r.addWidget(self.imageSetting)
95 | VBlayout_r.addWidget(self.spectrumColor)
96 | VBlayout_r.addWidget(self.spectrumStyle)
97 | VBlayout_r.addWidget(self.blendWindow)
98 | VBlayout_r.addWidget(self.aboutWindow)
99 |
100 | mainBox = QtWidgets.QHBoxLayout(self)
101 | mainBox.setSpacing(0)
102 | mainBox.setContentsMargins(5, 5, 0, 5)
103 |
104 | splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
105 | QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks'))
106 | splitter.addWidget(left)
107 | splitter.addWidget(right)
108 | splitter.setStretchFactor(0, 1)
109 | splitter.setStretchFactor(1, -1)
110 | splitter.setSizes([1, 330])
111 | mainBox.addWidget(splitter)
112 |
113 | self.setStyleSheet(stylepack)
114 |
115 | self.setAcceptDrops(True)
116 | self.canDrop = True
117 |
118 | self.resetLang = False
119 | global first_run, reset_lang
120 | if first_run or reset_lang:
121 | self.aboutWindow.show()
122 | first_run = False
123 | else:
124 | # self.aboutWindow.show()
125 | # self.blendWindow.show()
126 | self.mainMenu.show()
127 |
128 | self.timer = QtCore.QTimer(self)
129 | self.timer_ffmpeg = QtCore.QTimer(self)
130 | self.timer_ffmpeg.timeout.connect(self.ffmpegCheck)
131 | self.timer_ffmpeg.start(1000)
132 | self.error_log = 0
133 | self.isRunning = False
134 | self.stopWatch = time.time()
135 | self.time_cache = ""
136 |
137 | def ffmpegCheck(self):
138 | if not self.fb.ffmpegCheck():
139 | showInfo(self, self.lang["Notice"], self.lang["FFMPEG not found, please install FFMPEG!"])
140 | self.timer_ffmpeg.disconnect()
141 | self.timer_ffmpeg.stop()
142 |
143 | def dragEnterEvent(self, event):
144 | if event.mimeData().hasUrls:
145 | event.accept()
146 | else:
147 | event.ignore()
148 |
149 | def dropEvent(self, event):
150 | for url in event.mimeData().urls():
151 | self.fileEvent(url.toLocalFile())
152 | break
153 |
154 | def fileEvent(self, path):
155 | if self.canDrop:
156 | suffix = getFileSuffix(path)[1:].lower()
157 | if suffix == "fvsav":
158 | self.loadProject(path)
159 | elif suffix in self.audio_formats_arr or suffix in self.video_formats_arr:
160 | self.mainMenu.le_audio_path.setText(path)
161 | self.mainMenu.checkFilePath()
162 | elif suffix in self.image_formats_arr:
163 | self.imageSelector.selector1.fileEvent(path)
164 | else:
165 | showInfo(self, self.lang["Notice"], self.lang["Sorry, this file is not supported!"])
166 |
167 | def hideAllMenu(self):
168 | self.mainMenu.hide()
169 | self.imageSelector.hide()
170 | self.audioSetting.hide()
171 | self.videoSetting.hide()
172 | self.textWindow.hide()
173 | self.imageSetting.hide()
174 | self.spectrumColor.hide()
175 | self.spectrumStyle.hide()
176 | self.blendWindow.hide()
177 | self.aboutWindow.hide()
178 |
179 | def setAll(self):
180 | self.fb.setFilePath(image_path=self.vdic["image_path"],
181 | bg_path=self.vdic["bg_path"],
182 | sound_path=self.vdic["sound_path"],
183 | logo_path=self.vdic["logo_path"])
184 | self.fb.setOutputPath(output_path=self.vdic["output_path"],
185 | filename=self.vdic["filename"])
186 | self.fb.setText(text=self.vdic["text"], font=self.vdic["font"], relsize=self.vdic["relsize"],
187 | text_color=self.vdic["text_color"], text_glow=self.vdic["text_glow"])
188 | self.fb.setSpec(bins=self.vdic["bins"], lower=self.vdic["lower"], upper=self.vdic["upper"],
189 | color=self.vdic["color"], bright=self.vdic["bright"], saturation=self.vdic["saturation"],
190 | scalar=self.vdic["scalar"], smooth=self.vdic["smooth"],
191 | style=self.vdic["style"], linewidth=self.vdic["linewidth"],
192 | rotate=self.vdic["rotate"],
193 | beat_detect=self.vdic["beat_detect"], low_range=self.vdic["low_range"])
194 | self.fb.setVideoInfo(width=self.vdic["width"], height=self.vdic["height"],
195 | fps=self.vdic["fps"], br_Mbps=self.vdic["br_Mbps"],
196 | blur_bg=self.vdic["blur_bg"], use_glow=self.vdic["use_glow"],
197 | bg_mode=self.vdic["bg_mode"], quality=self.vdic["quality"])
198 | self.fb.setAudioInfo(normal=self.vdic["normal"], br_kbps=self.vdic["br_kbps"])
199 |
200 | def refreshAll(self):
201 | self.setBusy(True)
202 | self.setAll()
203 | self.viewer.imshow(self.fb.previewBackground(localViewer=False, forceRefresh=True))
204 | self.setBusy(False)
205 |
206 | def refreshLocal(self):
207 | self.setBusy(True)
208 | self.fb.setText(text=self.vdic["text"], font=self.vdic["font"], relsize=self.vdic["relsize"],
209 | text_color=self.vdic["text_color"], text_glow=self.vdic["text_glow"])
210 | self.fb.setSpec(bins=self.vdic["bins"], lower=self.vdic["lower"], upper=self.vdic["upper"],
211 | color=self.vdic["color"], bright=self.vdic["bright"], saturation=self.vdic["saturation"],
212 | scalar=self.vdic["scalar"], smooth=self.vdic["smooth"],
213 | style=self.vdic["style"], linewidth=self.vdic["linewidth"],
214 | rotate=self.vdic["rotate"],
215 | beat_detect=self.vdic["beat_detect"], low_range=self.vdic["low_range"])
216 | self.viewer.imshow(self.fb.previewBackground(localViewer=False))
217 | self.setBusy(False)
218 |
219 | def vdicBackup(self):
220 | self.vdic_stack.append(self.vdic)
221 |
222 | def setBusy(self, busyFlag):
223 | if busyFlag:
224 | self.setWindowTitle(" ".join([self.windowName, self.lang["(Computing...)"]]))
225 | QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
226 | else:
227 | QApplication.restoreOverrideCursor()
228 | self.setWindowTitle(self.windowName)
229 |
230 | def getBrief(self):
231 | brief = "
"
232 | brief += "" + self.lang["Output Path:"] + "
"
233 | if self.vdic["bg_mode"] >= 0:
234 | output = self.vdic["output_path"] + convertFileFormat(self.vdic["filename"], "mp4")
235 | else:
236 | output = self.vdic["output_path"] + convertFileFormat(self.vdic["filename"], "mov")
237 | brief += "" + output + "
"
238 | brief += "" + self.lang["Audio Path:"] + "
"
239 | brief += "" + self.vdic["sound_path"] + "
"
240 | brief += ""
241 | brief += "" + self.lang["Video Settings:"] + "
"
242 | brief += self.lang["Video Size:"] + " " + str(self.vdic["width"]) + "x" + str(self.vdic["height"]) + "
"
243 | brief += self.lang["FPS:"] + " " + str(self.vdic["fps"]) + "
"
244 | brief += self.lang["Video BR:"] + " " + str(self.vdic["br_Mbps"]) + " (Mbps)
"
245 | for key, item in self.videoSetting.items_quality.items():
246 | if self.vdic["quality"] == item:
247 | brief += self.lang["Render Quality:"] + " " + str(key) + "
"
248 | break
249 | brief += self.lang["Audio BR:"] + " " + str(self.vdic["br_kbps"]) + " (kbps)
"
250 | brief += self.lang["Volume Normalize:"] + " "
251 | if self.vdic["normal"]:
252 | brief += "" + self.lang["ON"] + ""
253 | else:
254 | brief += self.lang["OFF"]
255 | brief += "
"
256 | brief += self.lang["Analyzer Range:"] + " " + str(self.vdic["lower"]) + " ~ " + str(
257 | self.vdic["upper"]) + " Hz
"
258 | brief += self.lang["Spectrum Stabilize:"] + " " + str(self.vdic["smooth"]) + "
"
259 | for key, item in self.imageSetting.items_bg_mode.items():
260 | if [self.vdic["blur_bg"], self.vdic["bg_mode"]] == item:
261 | brief += self.lang["BG Mode:"] + " " + key
262 | break
263 |
264 | brief += "
"
265 | return brief
266 |
267 | def getIntro(self):
268 | intro = "" + self.lang["Version: "] + "" + __version__ + "
"
269 | intro += "{0}
".format(self.lang["Project Website: "])
270 | intro += """
271 |
272 | https://github.com/FerryYoungFan/FanselineVisualizer
273 |
274 | """
275 | intro += "{0}
".format(self.lang["Support me if you like this application:"])
276 | intro += """
277 |
278 | https://afdian.net/@Fanseline
279 |
280 | """
281 | intro += "{0}
".format(self.lang["About me:"])
282 | intro += """
283 |
284 | GitHub
285 |
286 | Twitter
287 |
288 | Pixiv
289 |
290 | """
291 | intro += "{0}
".format(self.lang["Special thanks to:"])
292 | intro += """
293 |
294 | 小岛美奈子
295 |
296 | Dougie Doggies
297 |
298 | 神楽坂雅詩
299 |
300 | L_liu Tony
301 |
302 | """
303 | intro += "{0}
".format(self.lang["... and all people who support me!"])
304 | return intro
305 |
306 | def startBlending(self):
307 | self.setAll()
308 | saveConfig()
309 | self.infoBridge = InfoBridge(self)
310 | self.fb.setConsole(self.infoBridge)
311 | self.error_log = 0
312 | self.isRunning = True
313 | self.timer.timeout.connect(self.realTimePreview)
314 | self.timer.start(200)
315 | self.stopWatch = time.time()
316 | self.time_cache = ""
317 |
318 | th_blend = threading.Thread(target=self.fb.runBlending)
319 | th_blend.setDaemon(True)
320 | th_blend.start()
321 |
322 | def stopBlending(self):
323 | self.fb.isRunning = False
324 | self.error_log = -1
325 |
326 | def realTimePreview(self):
327 | info = self.getBrief()
328 | if self.infoBridge.total != 0 and self.infoBridge.value == 0:
329 | self.blendWindow.prgbar.setValue(int(self.infoBridge.prepare * 1000))
330 | self.blendWindow.prgbar.setStyleSheet(progressbarStyle2)
331 | elif self.infoBridge.total != 0 and 0 < self.infoBridge.value < self.infoBridge.total:
332 | self.blendWindow.prgbar.setValue(int(self.infoBridge.value / self.infoBridge.total * 1000))
333 | self.blendWindow.prgbar.setStyleSheet(progressbarStyle1)
334 | elif self.infoBridge.total != 0 and self.infoBridge.value >= self.infoBridge.total:
335 | self.blendWindow.prgbar.setValue(int(self.infoBridge.combine * 1000))
336 | self.blendWindow.prgbar.setStyleSheet(progressbarStyle2)
337 | else:
338 | self.blendWindow.prgbar.setValue(0)
339 | if self.fb.isRunning or self.isRunning:
340 | if self.infoBridge.img_cache is not None:
341 | self.viewer.imshow(self.infoBridge.img_cache)
342 | self.infoBridge.img_cache = None
343 | if self.infoBridge.value > 0:
344 | elapsed = time.time() - self.stopWatch
345 | blended = self.infoBridge.value
346 | togo = self.infoBridge.total - self.infoBridge.value
347 | time_remain = secondToTime(elapsed / blended * togo)
348 | self.time_cache = self.lang["Remaining Time:"] + " " + time_remain + "
"
349 |
350 | if self.time_cache != "":
351 | info += "" + self.lang["Rendering:"] + " " + str(
352 | self.infoBridge.value) + " / " + str(self.infoBridge.total) + "
"
353 | info += self.time_cache
354 | else:
355 | info += "" + self.lang["Analyzing Audio..."] + " " \
356 | + str(round(self.infoBridge.prepare * 100)) + "%
"
357 |
358 | info += self.lang["Elapsed Time:"] + " " + secondToTime(time.time() - self.stopWatch) + "
"
359 |
360 | if self.infoBridge.value >= self.infoBridge.total:
361 | info += self.lang["Compositing Audio..."] + " " \
362 | + str(round(self.infoBridge.combine * 100)) + "%"
363 |
364 | self.blendWindow.textview.setHtml(info)
365 | self.blendWindow.textview.moveCursor(QtGui.QTextCursor.End)
366 | else:
367 | print("error log:" + str(self.error_log))
368 | if self.error_log == -1:
369 | info += "" + self.lang["Rendering Aborted!"] + "
"
370 | elif self.error_log == 1:
371 | showInfo(self, self.lang["Notice"], self.lang["Sorry, this audio file is not supported!"])
372 | info += "" + self.lang["Rendering Aborted!"] + "
"
373 | elif self.error_log == 2:
374 | showInfo(self, self.lang["Notice"], self.lang["FFMPEG not found, please install FFMPEG!"])
375 | info += "" + self.lang["Rendering Aborted!"] + "
"
376 | else:
377 | info += "" + self.lang["Mission Complete!"] + "
"
378 |
379 | self.error_log = 0
380 | info += self.lang["Elapsed Time:"] + " " + secondToTime(time.time() - self.stopWatch) + "
"
381 | self.blendWindow.textview.setHtml(info)
382 | self.blendWindow.textview.moveCursor(QtGui.QTextCursor.End)
383 | self.timer.stop()
384 | self.timer.disconnect()
385 | self.setWindowTitle(self.windowName)
386 | self.blendWindow.freezeWindow(False)
387 |
388 | def saveProject(self):
389 | if self.vdic["output_path"]:
390 | if self.vdic["filename"]:
391 | fpath = self.vdic["output_path"] + getFileName(self.vdic["sound_path"], False)
392 | else:
393 | fpath = self.vdic["output_path"]
394 | else:
395 | fpath = ""
396 | file_, filetype = QtWidgets.QFileDialog.getSaveFileName(self,
397 | self.lang["Save Project as..."],
398 | fpath + ".fvsav",
399 | self.lang["FV Project Files"] + " (*.fvsav)")
400 | if file_:
401 | saveConfig(file_)
402 |
403 | def loadProject(self, drag_path=None):
404 | if drag_path is None:
405 | selector = self.lang["FV Project Files"] + " (*.fvsav);;"
406 | selector = selector + self.lang["All Files"] + " (*.*)"
407 | if self.vdic["output_path"]:
408 | fpath = self.vdic["output_path"]
409 | else:
410 | fpath = ""
411 | file_, filetype = QtWidgets.QFileDialog.getOpenFileName(self, self.lang["Open Project File..."], fpath,
412 | selector)
413 | if not file_:
414 | print("No File!")
415 | else:
416 | vdic_open = loadConfig(file_)
417 | if vdic_open:
418 | self.vdic = vdic_open
419 | self.mainMenu.show()
420 | self.refreshAll()
421 | else:
422 | showInfo(self, self.lang["File Error"], self.lang["Cannot Read this Project!"])
423 | else:
424 | vdic_open = loadConfig(drag_path)
425 | if vdic_open:
426 | self.vdic = vdic_open
427 | self.mainMenu.show()
428 | self.refreshAll()
429 | else:
430 | showInfo(self, self.lang["File Error"], self.lang["Cannot Read this Project!"])
431 |
432 | def closeEvent(self, event):
433 | global close_app
434 | if self.isRunning:
435 | showInfo(self, self.lang["Notice"], self.lang["Please stop rendering before quit!"])
436 | event.ignore()
437 | elif self.resetLang:
438 | global reset_lang
439 | reset_lang = True
440 | saveConfig()
441 | close_app = False
442 | event.accept()
443 | else:
444 | saveConfig()
445 | close_app = True
446 | self.fb.removeTemp()
447 | event.accept()
448 |
449 |
450 | def secondToTime(time_sec):
451 | hour = int(time_sec // 3600)
452 | minute = int((time_sec - hour * 3600) // 60)
453 | second = int((time_sec - hour * 3600 - minute * 60) // 1)
454 | if hour < 10:
455 | hour = "0" + str(hour)
456 | if minute < 10:
457 | minute = "0" + str(minute)
458 | if second < 10:
459 | second = "0" + str(second)
460 | return str(hour) + ":" + str(minute) + ":" + str(second)
461 |
462 |
463 | config_path = getPath("FVConfig.fconfig")
464 | config = None
465 | lang_code = "en"
466 | lang = lang_en
467 | vdic_pre = {
468 | "image_path": getPath("Source/fallback.png"),
469 | "bg_path": (200, 200, 200, 200),
470 | "sound_path": None,
471 | "logo_path": getPath("Source/logo.png"),
472 | "output_path": None,
473 | "filename": None,
474 | "text": "Fanseline Visualizer",
475 | "font": getPath("Source/font.otf"),
476 | "relsize": 1.0,
477 | "text_color": (255, 255, 255, 255),
478 | "text_glow": True,
479 | "bins": 48,
480 | "lower": 55,
481 | "upper": 6000,
482 | "color": "gray",
483 | "bright": 0.6,
484 | "saturation": 0.5,
485 | "scalar": 1.0,
486 | "smooth": 4,
487 | "style": 0,
488 | "linewidth": 1.0,
489 | "width": 480,
490 | "height": 480,
491 | "fps": 30.0,
492 | "br_Mbps": 1.0,
493 | "blur_bg": True,
494 | "use_glow": False,
495 | "bg_mode": 0,
496 | "rotate": 0,
497 | "normal": False,
498 | "br_kbps": 320,
499 | "beat_detect": 0,
500 | "low_range": 12,
501 | "quality": 3,
502 | }
503 | vdic = vdic_pre
504 | close_app = False
505 | first_run = True
506 | reset_lang = False
507 |
508 |
509 | def loadConfig(user_path=None):
510 | global config, lang, lang_code, vdic, first_run
511 | if user_path is None:
512 | f_path = config_path
513 | else:
514 | f_path = user_path
515 | if user_path is None:
516 | try:
517 | with open(f_path, "rb") as handle:
518 | config = pickle.load(handle)
519 | if config["__version__"] != __version__:
520 | raise Exception("VersionError")
521 | lang_code = config["lang_code"]
522 | print(config)
523 | first_run = False
524 | except:
525 | print("no config")
526 | return False
527 | else:
528 | try:
529 | with open(f_path, "rb") as handle:
530 | config = pickle.load(handle)
531 | print("Loaded Project Version: " + config["__version__"])
532 | print(config)
533 | first_run = False
534 | except:
535 | return None
536 |
537 | vdic_read = config["vdic"]
538 |
539 | for key, value in vdic_read.items():
540 | try:
541 | vdic[key] = vdic_read[key]
542 | except:
543 | pass
544 |
545 | if user_path is None:
546 | if lang_code == "cn_s":
547 | lang = lang_cn_s
548 | else:
549 | lang = lang_en
550 | else:
551 | return vdic.copy()
552 |
553 |
554 | def saveConfig(user_path=None):
555 | global config, lang, lang_code, vdic, vdic_pre, appMainWindow
556 | if user_path is None:
557 | f_path = config_path
558 | else:
559 | f_path = user_path
560 |
561 | try:
562 | lang_code = appMainWindow.lang_code
563 | vdic = appMainWindow.vdic
564 | except:
565 | vdic = vdic_pre
566 | lang_code = "en"
567 |
568 | config = {
569 | "__version__": __version__,
570 | "lang_code": lang_code,
571 | "vdic": vdic,
572 | }
573 | try:
574 | with open(f_path, 'wb') as handle:
575 | pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)
576 | with open(f_path, "rb") as handle:
577 | config_test = pickle.load(handle)
578 | if config_test["__version__"] != __version__:
579 | raise Exception("AuthorityError")
580 | except:
581 | showInfo(appMainWindow, appMainWindow.lang["Notice"], appMainWindow.lang["Error! Cannot save config!"])
582 |
583 |
584 | if __name__ == '__main__':
585 | appMainWindow = None
586 | app = QtWidgets.QApplication(sys.argv)
587 | _font = None
588 | font_sets = ["Source/font.otf", "Source/font.ttf"]
589 | for font_path in font_sets:
590 | if os.path.exists(getPath(font_path)):
591 | try:
592 | fontid = QtGui.QFontDatabase.addApplicationFont(font_path)
593 | _fontstr = QtGui.QFontDatabase.applicationFontFamilies(fontid)[0]
594 | stylepack = stylepack.replace("Arial", _fontstr)
595 | break
596 | except:
597 | pass
598 |
599 |
600 | def startMain():
601 | global appMainWindow, lang, vdic, first_run, _font
602 | loadConfig()
603 | appMainWindow = MainWindow(lang, vdic, lang_code, first_run)
604 | appMainWindow.resize(1050, 700)
605 | appMainWindow.show()
606 | appMainWindow.refreshAll()
607 |
608 |
609 | while not close_app:
610 | startMain()
611 | app.exec_()
612 |
613 | sys.exit()
614 |
--------------------------------------------------------------------------------
/SourceCode/FanWheels_PIL.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from PIL import Image, ImageFilter, ImageDraw, ImageEnhance, ImageChops, ImageFont
5 | import os, sys
6 |
7 |
8 | def getPath(fileName): # for different operating systems
9 | path = os.path.join(os.path.dirname(sys.argv[0]), fileName)
10 | return path
11 |
12 |
13 | def imageOrColor(path, mode):
14 | if isinstance(path, str):
15 | try:
16 | img = Image.open(getPath(path)).convert(mode)
17 | if img is not None:
18 | return img
19 | else:
20 | raise Exception("Can not open image")
21 | except:
22 | return None
23 | if isinstance(path, tuple):
24 | try:
25 | if mode == "RGB":
26 | img = Image.new(mode, (512, 512), path[:3])
27 | else:
28 | img = Image.new(mode, (512, 512), path[:4])
29 | if img is not None:
30 | return img
31 | else:
32 | raise Exception("Can not generate image")
33 | except:
34 | return None
35 | return None
36 |
37 |
38 | def openImage(path, mode="RGBA", fallbacks=None):
39 | img = imageOrColor(path, mode)
40 | if img is None and isinstance(fallbacks, list):
41 | for fallback in fallbacks:
42 | if fallback is None:
43 | return None
44 | img = imageOrColor(fallback, mode)
45 | if img is not None:
46 | return img
47 | else:
48 | return img
49 | if mode == "RGBA":
50 | img = Image.new('RGB', (512, 512), (0, 0, 0))
51 | else:
52 | img = Image.new('RGBA', (512, 512), (0, 0, 0, 255))
53 | return img
54 |
55 |
56 | def cropToCenter(img):
57 | width, height = img.size
58 | square = min(width, height)
59 | left = (width - square) / 2
60 | top = (height - square) / 2
61 | right = (width + square) / 2
62 | bottom = (height + square) / 2
63 | im = img.crop((left, top, right, bottom))
64 | return im
65 |
66 |
67 | def cropCircle(img, size=None, quality=3):
68 | img = cropToCenter(img)
69 | if size is not None:
70 | img = img.resize((size, size), Image.ANTIALIAS)
71 | # Antialiasing Drawing
72 | width, height = img.size
73 | old_mask = img.split()[-1]
74 | quality_list = [1, 1, 2, 4, 8, 8]
75 | scale = quality_list[quality]
76 | size_anti = width * scale, height * scale
77 | mask = Image.new('L', size_anti, 255)
78 | draw = ImageDraw.Draw(mask)
79 | draw.ellipse((0, 0) + size_anti, fill=0)
80 | mask = mask.resize((size, size), Image.ANTIALIAS)
81 | mask = ImageChops.subtract(old_mask, mask)
82 | img.putalpha(mask)
83 | return img
84 |
85 |
86 | def resizeRatio(img, ratio):
87 | width, height = img.size
88 | width = int(round(width * ratio))
89 | height = int(round(height * ratio))
90 | return img.resize((width, height), Image.ANTIALIAS)
91 |
92 |
93 | def cropBG(img, size):
94 | width, height = img.size
95 | if (width < size[0] and height < size[1]) or (width > size[0] and height > size[1]):
96 | img = resizeRatio(img, size[0] / width)
97 | width, height = img.size
98 | if height < size[1]:
99 | img = resizeRatio(img, size[1] / height)
100 | width, height = img.size
101 | elif width < size[0] and height >= size[1]:
102 | img = resizeRatio(img, size[0] / width)
103 | width, height = img.size
104 | elif width >= size[0] and height < size[1]:
105 | img = resizeRatio(img, size[1] / height)
106 | width, height = img.size
107 |
108 | left = (width - size[0]) / 2
109 | top = (height - size[1]) / 2
110 | right = (width + size[0]) / 2
111 | bottom = (height + size[1]) / 2
112 |
113 | img = img.crop((left, top, right, bottom))
114 | return img
115 |
116 |
117 | def genBG(img, size, blur=41, bright=0.35):
118 | img = cropBG(img, size)
119 | img_blur = img.filter(ImageFilter.GaussianBlur(radius=blur))
120 | enhancer = ImageEnhance.Brightness(img_blur)
121 | output = enhancer.enhance(bright)
122 | return output
123 |
124 |
125 | def pasteMiddle(fg, bg, glow=False, blur=2, bright=1):
126 | fg_w, fg_h = fg.size
127 | bg_w, bg_h = bg.size
128 | offset = ((bg_w - fg_w) // 2, (bg_h - fg_h) // 2)
129 |
130 | if glow:
131 | brt = int(round(bright * 255))
132 | if brt > 255:
133 | brt = 255
134 | elif brt < 0:
135 | brt = 0
136 | canvas = Image.new('RGBA', (bg_w, bg_h), (brt, brt, brt, 0))
137 | canvas.paste(fg, offset, fg)
138 | mask = canvas.split()[-1]
139 | mask = mask.point(lambda i: i * bright)
140 | mask = mask.filter(ImageFilter.GaussianBlur(radius=blur))
141 | mask = mask.resize((bg_w, bg_h))
142 | if bg.mode == "L":
143 | canvas = mask
144 | elif bg.mode == "RGB":
145 | canvas = Image.merge('RGB', (mask, mask, mask))
146 | elif bg.mode == "RGBA":
147 | canvas = Image.merge('RGBA', (mask, mask, mask, mask))
148 | bg = ImageChops.add(bg, canvas)
149 |
150 | bg.paste(fg, offset, fg)
151 | return bg
152 |
153 |
154 | def glowText(img, text=None, font_size=35, font_set=None, color=(255, 255, 255, 255), blur=2, logo=None, use_glow=True,
155 | yoffset=0):
156 | width, height = img.size
157 | width = width
158 | height = height
159 | font_size = font_size
160 | blur = blur
161 |
162 | if font_set is None:
163 | _font = ImageFont.truetype("arial.ttf", font_size)
164 | else:
165 | _font = "arial.ttf"
166 | for font_i in [font_set, getPath("Source/font.ttf"), getPath("Source/font.otf"), "arial.ttf"]:
167 | try:
168 | _font = ImageFont.truetype(font_i, font_size)
169 | break
170 | except:
171 | print("Cannot Use Font: {0}".format(font_i))
172 |
173 | canvas = Image.new('RGBA', (width, height), color[:-1] + (0,))
174 | draw = ImageDraw.Draw(canvas)
175 | if text:
176 | w, h = draw.textsize(text, font=_font)
177 | else:
178 | w, h = 0, 0
179 | xoffset = 0
180 | if logo is not None:
181 | lg_w, lg_h = logo.size
182 | hoffset = 1.1
183 | lg_nh = round(font_size * hoffset)
184 | lg_nw = round(lg_w * lg_nh / lg_h)
185 | logo = logo.resize((lg_nw, lg_nh), Image.ANTIALIAS)
186 | if text:
187 | xoffset = lg_nw + font_size / 4
188 | else:
189 | xoffset = lg_nw
190 | w = w + xoffset
191 | _x_logo = int(round((width - w) / 2))
192 | _y_logo = int(round(yoffset - font_size * (hoffset - 1) / 2))
193 | try:
194 | canvas.paste(logo, (_x_logo, _y_logo), logo)
195 | except:
196 | canvas.paste(logo, (_x_logo, _y_logo))
197 | if text:
198 | draw.text(((width - w) / 2 + xoffset, yoffset), text, fill=color, font=_font)
199 | if use_glow:
200 | mask_blur = canvas.split()[-1]
201 | mask_blur = mask_blur.filter(ImageFilter.GaussianBlur(radius=blur * 2))
202 | fg_blur = canvas.split()[0]
203 | fg_blur = fg_blur.filter(ImageFilter.GaussianBlur(radius=blur * 2))
204 | glow_text = Image.merge("RGBA", (fg_blur, fg_blur, fg_blur, mask_blur))
205 | glow_text = glow_text.resize(img.size, Image.ANTIALIAS)
206 | canvas = canvas.resize(img.size, Image.ANTIALIAS)
207 | img.paste(glow_text, (0, 0), mask=glow_text)
208 | img.paste(canvas, (0, 0), mask=canvas)
209 | else:
210 | canvas = canvas.resize(img.size, Image.ANTIALIAS)
211 | img.paste(canvas, (0, 0), mask=canvas)
212 | return img
213 |
214 |
215 | def hsv_to_rgb(h, s, v):
216 | if s == 0.0:
217 | v = int(v * 255)
218 | return v, v, v
219 | i = int(h * 6.)
220 | f = (h * 6.) - i
221 | p, q, t = int(255 * (v * (1. - s))), int(255 * (v * (1. - s * f))), int(255 * (v * (1. - s * (1. - f))))
222 | v = int(v * 255)
223 | i %= 6
224 | if i == 0: return v, t, p
225 | if i == 1: return q, v, p
226 | if i == 2: return p, v, t
227 | if i == 3: return p, q, v
228 | if i == 4: return t, p, v
229 | if i == 5: return v, p, q
230 |
231 |
232 | def rgb_to_hsv(r, g, b):
233 | r, g, b = r / 255.0, g / 255.0, b / 255.0
234 | mx = max(r, g, b)
235 | mn = min(r, g, b)
236 | df = mx - mn
237 | if mx == mn:
238 | h = 0
239 | elif mx == r:
240 | h = (60 * ((g - b) / df) + 360) % 360
241 | elif mx == g:
242 | h = (60 * ((b - r) / df) + 120) % 360
243 | elif mx == b:
244 | h = (60 * ((r - g) / df) + 240) % 360
245 | if mx == 0:
246 | s = 0
247 | else:
248 | s = df / mx
249 | v = mx
250 | return h / 360, s, v
251 |
252 |
253 | def hex_to_rgb(value):
254 | value = value.lstrip('#')
255 | lv = len(value)
256 | return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
257 |
258 |
259 | def rgb_to_hex(rgb):
260 | return '#%02x%02x%02x' % rgb
261 |
262 |
263 | def glowFx(image, radius=0, brt=1.5):
264 | if radius > 0:
265 | base = image.copy()
266 | image = image.filter(ImageFilter.GaussianBlur(radius=radius))
267 | base = ImageChops.add(image, base)
268 | return base
269 | else:
270 | return image
271 |
272 | # def glowFx(image, radius=0, brt=1.5):
273 | # if radius > 0:
274 | # base = image.copy()
275 | # image = image.filter(ImageFilter.BoxBlur(radius=radius))
276 | # enhancer = ImageEnhance.Brightness(image)
277 | # image = enhancer.enhance(brt)
278 | # base.paste(image, (0, 0), image)
279 | # return base
280 | # else:
281 | # return image
282 |
--------------------------------------------------------------------------------
/SourceCode/FanWheels_ffmpeg.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import imageio_ffmpeg
5 | import subprocess
6 |
7 |
8 | def time2second(time_str):
9 | hour = int(time_str[:-9])
10 | minute = int(time_str[-8:-6])
11 | second = int(time_str[-5:-3])
12 | msecond = int(time_str[-2:])
13 | return (hour * 3600) + (minute * 60) + second + (msecond / 1000)
14 |
15 |
16 | def ffcmd(args="", console=None, task=0):
17 | ffpath = imageio_ffmpeg.get_ffmpeg_exe()
18 | cmd = ffpath + " " + args
19 | print("ffmpeg:", cmd)
20 |
21 | duration = None
22 | progress = 0
23 | process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", shell=True,
24 | universal_newlines=True)
25 | for line in process.stdout:
26 | input_index = line.find("Input") # "Duration" in file name
27 | if duration is None:
28 | duration_index = line.find("Duration:")
29 | if input_index == -1 and duration_index != -1:
30 | duration_time = line.split(": ")[1].split(",")[0]
31 | duration = time2second(duration_time)
32 | else:
33 | time_index = line.find("time=")
34 | if input_index == -1 and time_index != -1:
35 | now_time = line.split("time=")[1].split(" ")[0]
36 | if duration > 0:
37 | progress = time2second(now_time) / duration
38 | print("Progress: " + str(round(progress * 100)) + "%")
39 | if console is not None:
40 | try:
41 | if task == 1:
42 | console.prepare = progress
43 | elif task == 2:
44 | console.combine = progress
45 | if not console.parent.fb.isRunning:
46 | print("terminate!!!")
47 | process.kill()
48 | break
49 | except:
50 | pass
51 |
52 | print("ffmpeg: Done!")
53 |
54 |
55 | def toTempWaveFile(file_in, file_out, console=None):
56 | cmd = '-i \"{0}\" -ar 44100 -ac 1 -filter:a loudnorm -y \"{1}\"'.format(file_in, file_out)
57 | ffcmd(cmd, console, task=1)
58 |
59 |
60 | def combineVideo(video, audio, file_out, audio_quality="320k", normal=False, console=None):
61 | cmd = '-i \"{0}\" -itsoffset 0.0 -i \"{1}\" '.format(video, audio)
62 | cmd += '-map 0:v:0 -c:v copy -map 1:a:0 -b:a {0} -c:a aac '.format(audio_quality)
63 | if normal:
64 | cmd += '-filter:a loudnorm '
65 | cmd += '-metadata description=\"Rendered by Fanseline Visualizer\" '
66 | cmd += '-y \"{0}\"'.format(file_out)
67 | ffcmd(cmd, console, task=2)
68 |
69 |
70 | def cvtFileName(path, new_format=None):
71 | if new_format is None:
72 | return path
73 | last_dot = 0
74 | for i in range(len(path)):
75 | if path[i] == ".":
76 | last_dot = i
77 | ftype = path[last_dot + 1:]
78 | if new_format[0] == ".":
79 | pass
80 | else:
81 | new_format = "." + new_format
82 | if "/" in ftype or "\\" in ftype:
83 | outpath = path + new_format
84 | else:
85 | outpath = path[:last_dot] + new_format
86 | return outpath
87 |
88 |
89 | if __name__ == '__main__':
90 | ffcmd("-version")
91 |
--------------------------------------------------------------------------------
/SourceCode/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 FerryYoungFan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNEC
--------------------------------------------------------------------------------
/SourceCode/LanguagePack.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | lang_cn_s = {
5 | "Fanseline Visualizer": "Fanseline Visualizer",
6 | "Stop Rendering": "取消渲染",
7 | "(Rendering...)": "(渲染中……)",
8 | "(Computing...)": "(计算中……)",
9 | "ON": "开",
10 | "OFF": "关",
11 | "Render & Export": "渲染输出",
12 | "Open": "打开",
13 | "Use Color": "使用纯色",
14 | "No Image File": "没有图片文件",
15 | "Frequency Analyzer Preset": "频率分析预设",
16 | "Audio": "音频",
17 | "Video": "视频",
18 | "Spectrum": "频谱",
19 | "Render": "渲染",
20 | "": "FFT低频",
21 | "": "FFT高频",
22 | "Analyzer Frequency Range (Hz)": "频率分析器范围(赫兹)",
23 | "Analyzer Range:": "分析器范围:",
24 |
25 | "": "<请打开一个音频文件>",
26 | "": "<请打开一个字体文件>",
27 | "Select Font": "选择字体",
28 | "Open Audio File": "打开音频文件",
29 | "Audio Settings": "音频设置",
30 | "Video Settings": "视频设置",
31 | "Video Preset": "视频预设",
32 | "Auto Video Bit Rate": "自动视频码率",
33 | "Frame Rate (FPS)": "帧率(FPS)",
34 | "": "<帧率>",
35 | "Frame Size": "帧大小",
36 | "": "<宽>",
37 | "": "<高>",
38 | "Video Bit Rate (Mbps)": "视频码率 (Mbps)",
39 | "": "<码率>",
40 | "Audio Bit Rate": "音频码率",
41 | "Volume Normalize": "音量标准化",
42 | "Volume Normalize:": "音量标准化:",
43 | "Remove": "移除",
44 | "Foreground": "主图片",
45 | "Background": "背景图片",
46 | "Logo": "小图标",
47 | "Select Audio": "选择音频",
48 | "Select Images": "选择图片",
49 | "Back to Main Menu": "返回主菜单",
50 | "Select an Image": "选择一张图片",
51 | "Select Audio File": "选择音频文件",
52 | "Select Color": "选择颜色",
53 | "Customize Color": "自定义颜色",
54 | "Foreground Selected: ": "已选择主图片: ",
55 | "Background Selected: ": "已选择背景图片: ",
56 | "Logo Selected: ": "已选择图标: ",
57 | "Audio Selected: ": "已选择音频: ",
58 | "Image Files": "图片文件",
59 | "All Files": "全部文件",
60 | "Audio Files": "音频文件",
61 | "Video Files": "视频文件",
62 | "Font Files": "字体文件",
63 | "Font Size": "文字大小",
64 | "Text Settings": "文字设置",
65 | "Edit Text": "编辑文字",
66 | "Clear Text": "清空文本",
67 | "Refresh Preview": "刷新预览",
68 | "": "<在此输入文字>",
69 | "Use File Name": "使用文件名",
70 | "Select Font File": "选择字体文件",
71 | "Image Settings": "图片设置",
72 | "Background Mode": "背景模式",
73 | "Spin Foreground": "旋转主图",
74 | "No Spin": "不旋转",
75 | "Clockwise": "顺时针旋转",
76 | "Counterclockwise": "逆时针旋转",
77 | "Spin Speed (rpm)": "旋转速度 (转/分钟)",
78 | "": "<旋转速度>",
79 | "Customize": "自定义",
80 | "Spectrum Color": "频谱颜色",
81 | "Spectrum Saturation": "频谱饱和度",
82 | "Spectrum Brightness": "频谱亮度",
83 | "Glow Effect (Slow)": "发光效果 (缓慢)",
84 |
85 | "Spectrum Style": "频谱样式",
86 | "Spectrum Number": "频谱数量",
87 | "Spectrum Thickness": "频谱线宽",
88 | "Spectrum Scalar": "频谱条灵敏倍率",
89 | "Spectrum Stabilize": "频谱防抖",
90 |
91 | "Save Project": "保存项目",
92 | "Load Project": "读取项目",
93 | "Save Project as...": "项目保存为……",
94 | "FV Project Files": "FV项目文件",
95 | "Open Project File...": "打开项目文件……",
96 | "File Error": "文件错误",
97 | "Cannot Read this Project!": "无法读取该项目!",
98 |
99 | "Save as PNG": "保存为PNG",
100 | "Save as JPG": "保存为JPG",
101 | "Save as BMP": "保存为BMP",
102 | "PNG Files": "PNG文件",
103 | "JPG Files": "JPG文件",
104 | "BMP Files": "BMP文件",
105 | "Files": "文件",
106 | "Output Config": "输出配置",
107 | "Rendering:": "渲染中:",
108 | "Compositing Audio...": "音频合成中……",
109 | "Analyzing Audio...": "分析音频中……",
110 | "Mission Complete!": "任务完成!",
111 | "Elapsed Time:": "已用时:",
112 | "Remaining Time:": "剩余时间:",
113 |
114 | "Output Path:": "输出路径:",
115 | "Audio Path:": "音频路径:",
116 | "Audio Settings:": "音频设置:",
117 | "Video Settings:": "视频设置:",
118 |
119 | "Start Rendering": "开始渲染",
120 | "Ready to Render": "准备渲染",
121 | "About FVisualizer": "关于本软件",
122 | "About": "关于",
123 | "Version: ": "版本:",
124 | "Support me if you like this application:": "如果你喜欢这个应用,欢迎来赞助我:",
125 | "About me:": "关于我:",
126 | "Special thanks to:": "特别感谢:",
127 | "... and all people who support me!": "……以及所有支持我的人!",
128 |
129 | "Output Path": "输出路径",
130 | "Select Path": "选择路径",
131 | "