.
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
ViPER4Android Presets
3 |
4 | Largest collection of DDC(.vdc), Kernel(.irs) & Preset(.xml) for ViPER4Android
5 |
6 | [](https://github.com/syntaxticsugr/ViPER4Android-Presets/releases/latest) [](https://github.com/syntaxticsugr/ViPER4Android-Presets/releases)
7 |
8 |
9 |
10 |
11 |
12 | `DDC(.vdc)` & `Kernel(.irs)` files are provided _as is_ from the orignal authors without any modifications.
13 |
14 | `Preset(.xml)` are patched _as & if_ necessary to work on the latest ViPER4Android.
15 |
16 | Values of `Master Limiter` & `Playback Gain Control` are set to [ViPER Defaults](https://github.com/syntaxticsugr/ViPER4Android-Presets/tree/main/default_presets) for all `Preset(.xml)`.
17 |
18 |
19 |
20 | ### Release Info
21 |
22 | | [v2.2.0](https://github.com/syntaxticsugr/ViPER4Android-Presets/releases/latest) | Full | Lite | Recommended |
23 | | --- | --- | --- | --- |
24 | | ⭐ | All `Preset`s
All `Kernel`s
All `DDC`s | Unique `Preset`s
Required `Kernel`s
Required `DDC`s | Unique `Preset`s
Unique `Kernel`s
Unique `DDC`s |
25 | | **Preset** | 1685 | 851 | 851 |
26 | | **Kernel** | 2306 | 197 | 1716 |
27 | | **DDC** | 626 | 45 | 580 |
28 |
29 |
30 |
31 | ### [How To use?](https://github.com/syntaxticsugr/ViPER4Android-Presets/discussions/3)
32 |
33 |
34 |
35 | ### Want to share your collection of DDC's, Kernel's & Preset's?
36 | [Fill this Form : )](https://forms.gle/1JShGMdbTbujJfKQ9)
37 |
38 |
39 |
40 | ## Credits
41 |
42 | [](https://github.com/jadilson12) [](https://github.com/Joe0Bloggs) [](https://github.com/JohnFawkes) [](https://github.com/programminghoch10) [](https://github.com/WSTxda)
43 |
--------------------------------------------------------------------------------
/default_presets/README.md:
--------------------------------------------------------------------------------
1 |
2 | +-----------+-----------------+------------------------------------------+
3 | | m1 | Mode 1 | Headset, Bluetooth, USB |
4 | +-----------+-----------------+------------------------------------------+
5 | | m2 | Mode 2 | Speaker |
6 | +-----------+-----------------+------------------------------------------+
7 |
8 |
--------------------------------------------------------------------------------
/default_presets/default_m1.xml:
--------------------------------------------------------------------------------
1 |
2 |
67 |
--------------------------------------------------------------------------------
/default_presets/default_m2.xml:
--------------------------------------------------------------------------------
1 |
2 |
41 |
--------------------------------------------------------------------------------
/default_presets/m1_keys.txt:
--------------------------------------------------------------------------------
1 | m1 (Mode 1) = Headset, Bluetooth, USB
2 | -------------------------------------
3 |
4 | # Mode
5 | mode="32775"
6 |
7 | # Master Switch
8 | enable="36868"
9 |
10 | # Master Gate (Limiter)
11 | gate_outputvolume="65586"
12 | gate_channelpan="65587"
13 | gate_limiter="65588"
14 |
15 | # Playback Gain Control
16 | playbackgain_enable="65565"
17 | playbackgain_ratio="65566"
18 | playbackgain_volume="65567"
19 | playbackgain_maxscaler="65568"
20 |
21 | # FET Compressor
22 | fet_enable="65610"
23 | fet_threshold="65611"
24 | fet_ratio="65612"
25 | fet_knee="65613"
26 | fet_autoknee="65614"
27 | fet_gain="65615"
28 | fet_autogain="65616"
29 | fet_attack="65617"
30 | fet_autoattack="65618"
31 | fet_release="65619"
32 | fet_autorelease="65620"
33 | fet_kneemulti="65621"
34 | fet_maxattack="65622"
35 | fet_maxrelease="65623"
36 | fet_crest="65624"
37 | fet_adapt="65625"
38 | fet_noclipenable="65626"
39 |
40 | # ViPER DDC
41 | ddc_enable="65546"
42 | ddc_device="65547"
43 |
44 | # Spectrum Extension
45 | vse_enable="65548"
46 | vse_value="65549;65550"
47 |
48 | # FIR Equalizer
49 | fireq_enable="65551"
50 | fireq="65552"
51 |
52 | # Convolver
53 | convolver_enable="65538"
54 | convolver_kernel="65540;65541;65542"
55 | convolver_crosschannel="65543"
56 |
57 | # Field Surround
58 | colorfulmusic_enable="65553"
59 | colorfulmusic_coeffs="65554;65556"
60 | colorfulmusic_midimage="65555"
61 |
62 | # Differential Surround
63 | diffsurr_enable="65557"
64 | diffsurr_delay="65558"
65 |
66 | # Headphone Surround +
67 | vhs_enable="65544"
68 | vhs_quality="65545"
69 |
70 | # Reverberation
71 | reverberation_enable="65559"
72 | reverberation_roomsize="65560"
73 | reverberation_room_width="65561"
74 | reverberation_damp="65562"
75 | reverberation_wet="65563"
76 | reverberation_dry="65564"
77 |
78 | # Dynamic System
79 | dynamicsystem_enable="65569"
80 | dynamicsystem_device="65570;65571;65572"
81 | dynamicsystem_strength="65573"
82 |
83 | # Tube Simulator (6N1J)
84 | tube_simulator_enable="65583"
85 |
86 | # ViPER Bass
87 | fidelity_bass_enable="65574"
88 | fidelity_bass_mode="65575"
89 | fidelity_bass_frequency="65576"
90 | fidelity_bass_gain="65577"
91 |
92 | # ViPER Clarity
93 | fidelity_clarity_enable="65578"
94 | fidelity_clarity_mode="65579"
95 | fidelity_clarity_gain="65580"
96 |
97 | # Auditory System Protection
98 | cure_enable="65581"
99 | cure_crossfeed="65582"
100 |
101 | # AnalogX
102 | analogx_enable="65584"
103 | analogx_mode="65585"
104 |
--------------------------------------------------------------------------------
/default_presets/m2_keys.txt:
--------------------------------------------------------------------------------
1 | m2 (Mode 2) = Speaker
2 | ---------------------
3 |
4 | # Mode
5 | mode="32775"
6 |
7 | # Master Switch
8 | enable="36868"
9 |
10 | # Master Gate (Limiter)
11 | gate_outputvolume="65586"
12 | gate_limiter="65588"
13 |
14 | # Playback Gain Control
15 | playbackgain_enable="65565"
16 | playbackgain_ratio="65566"
17 | playbackgain_volume="65567"
18 | playbackgain_maxscaler="65568"
19 |
20 | # FET Compressor
21 | fet_enable="65610"
22 | fet_threshold="65611"
23 | fet_ratio="65612"
24 | fet_knee="65613"
25 | fet_autoknee="65614"
26 | fet_gain="65615"
27 | fet_autogain="65616"
28 | fet_attack="65617"
29 | fet_autoattack="65618"
30 | fet_release="65619"
31 | fet_autorelease="65620"
32 | fet_kneemulti="65621"
33 | fet_maxattack="65622"
34 | fet_maxrelease="65623"
35 | fet_crest="65624"
36 | fet_adapt="65625"
37 | fet_noclipenable="65626"
38 |
39 | # FIR Equalizer
40 | fireq_enable="65551"
41 | fireq="65552"
42 |
43 | # Convolver
44 | convolver_enable="65538"
45 | convolver_kernel="65540;65541;65542"
46 | convolver_crosschannel="65543"
47 |
48 | # Reverberation
49 | reverberation_enable="65559"
50 | reverberation_roomsize="65560"
51 | reverberation_room_width="65561"
52 | reverberation_damp="65562"
53 | reverberation_wet="65563"
54 | reverberation_dry="65564"
55 |
56 | # Speaker Optimization
57 | speaker_optimization="65603"
58 |
--------------------------------------------------------------------------------
/in/README.md:
--------------------------------------------------------------------------------
1 | ## Credits
2 |
3 |
4 |
5 | All current available `DDC`s, `Kernel`s, & `Preset`s are sourced from -
6 |
7 | S.No. | Author | DDC, Kernel & Preset
8 | --- | --- | ---
9 | **1** | [Jadilson Guedes](https://github.com/jadilson12) | [Viper4Android-presets](https://github.com/jadilson12/Viper4Android-presets)
10 | **2** | [Joe0Bloggs](https://github.com/Joe0Bloggs) | [IRS](https://www.dropbox.com/sh/vbnj47jcnbgrvkv/AAAcAP6ypyGJa0995Nq37PEFa)
11 | **3** | [John Fawkes](https://github.com/JohnFawkes) | [ViperIRS](https://drive.google.com/file/d/1Bii6ER0cNgHMspVozMIfYfFAu3l16d_-/view?usp=sharing)
12 | **4** | [programminghoch10](https://github.com/programminghoch10) | [ViPER4AndroidRepackaged](https://github.com/programminghoch10/ViPER4AndroidRepackaged)
13 | **5** | [WSTxda](https://github.com/WSTxda) | [ViperFX-RE-Releases](https://github.com/WSTxda/ViperFX-RE-Releases)
14 |
15 | Special thanks to the respective developers for their contributions.
16 |
--------------------------------------------------------------------------------
/in/download.py:
--------------------------------------------------------------------------------
1 | import gdown
2 | from pathlib import Path
3 | from utils import create_directories
4 |
5 | def download_files(download_dir: Path, file_ids: dict) -> None:
6 | """
7 | Downloads files from Google Drive using their file IDs and saves them into
8 | a specified directory. It organizes the files by author, creating a separate
9 | folder for each author.
10 |
11 | Args:
12 | download_dir (str): The directory where the files should be saved.
13 | file_ids (dict): A dictionary where each key is an author name and the value
14 | is another dictionary with file names as keys and Google Drive file IDs as values.
15 | """
16 |
17 | for author, files in file_ids.items():
18 | author_dir = download_dir/author
19 | create_directories([author_dir])
20 |
21 | for file_name, file_id in files.items():
22 | out_file = author_dir/f'{file_name}.zip'
23 | gdown.download(id=file_id, output=out_file)
24 |
25 | if __name__ == "__main__":
26 |
27 | download_dir = Path('in')
28 |
29 | # {
30 | # 'author': {
31 | # 'file_name': 'file_id'
32 | # }
33 | # }
34 | file_ids = {
35 | 'jadilson12': {
36 | 'Viper4Android-presets': '19pn29medRLzy8m9uPzkPmpvPSgHIsbz7'
37 | },
38 | 'Joe0Bloggs': {
39 | 'IRS': '19rEUl8QlBUWpgWrpaMxv2XnEVpZxXavV'
40 | },
41 | 'JohnFawkes': {
42 | 'ViperIRS': '19thPV8G2eOohh-ihUJadEL3bwqwgvd-G'
43 | },
44 | 'programminghoch10': {
45 | 'ViperIRS': '1mu3l2mLuRlpuIIKUoA6a6Eg2Y27xEcrl',
46 | 'ViperVDC': '1oojAJp8ze7SzC5KHoGGGb6x1MIuSekjL'
47 | },
48 | 'WSTxda': {
49 | 'DDC': '1CKByuHZ_6AMHDC2NIo2H92ZEkR6Cv1SR',
50 | 'Kernel': '1y1HiV-SvzmqoYEzAIuztV-tya6DkYQzO'
51 | }
52 | }
53 |
54 | download_files(download_dir, file_ids)
55 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from pathlib import Path
4 | from pipe.convert import convert_presets
5 | from pipe.extract import extract_archives
6 | from pipe.filter import filter_irs_vdc_xml
7 | from pipe.release import create_release
8 | from utils.create_directories import create_directories
9 |
10 | def process(input_dir: Path, output_dir: Path, version: str):
11 | extract_dir = extract_archives(input_dir, output_dir)
12 | irs_dir, vdc_dir, xml_dir = filter_irs_vdc_xml(extract_dir, output_dir)
13 | preset_converted_dir = convert_presets(xml_dir, output_dir)
14 | release_dir = create_release(irs_dir, vdc_dir, preset_converted_dir, output_dir, version)
15 | print(f"Files Saved In: {release_dir}")
16 |
17 | def main(input_dir: Path, output_dir: Path, version: str):
18 | if not os.path.isdir(input_dir):
19 | print(f"Error: The input directory '{input_dir}' does not exist.")
20 | sys.exit(1)
21 |
22 | create_directories([output_dir])
23 | process(input_dir, output_dir, version)
24 |
25 | if __name__ == "__main__":
26 |
27 | version = '2.2.0'
28 |
29 | input_dir = Path('in')
30 | output_dir = Path('build/output')
31 |
32 | main(input_dir, output_dir, version)
33 |
--------------------------------------------------------------------------------
/pipe/convert.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from utils.create_directories import create_directories
4 |
5 | # Path to Default Presets
6 | default_m1_preset_file = Path('default_presets/default_m1.xml')
7 | default_m2_preset_file = Path('default_presets/default_m2.xml')
8 |
9 | def convert_presets(input_dir: Path, output_dir: Path) -> Path:
10 | """Convert any ViPER Preset XML to version 2.7.2+ compatible format."""
11 | print("Converting Presets ...")
12 |
13 | preset_converted_dir = output_dir/'preset-converted'
14 | create_directories([preset_converted_dir])
15 |
16 | # List all files in input directory
17 | for root, _, files in os.walk(input_dir):
18 | root = Path(root)
19 |
20 | # For each file
21 | for file in files:
22 |
23 | old_preset_file = root/file
24 | new_preset_file = preset_converted_dir/file
25 | # Select Default Preset file
26 | if (any(keyword in file.lower() for keyword in ['bluetooth', 'headset', 'usb'])):
27 | default_preset_file = default_m1_preset_file
28 | elif ('speaker' in file.lower()):
29 | default_preset_file = default_m2_preset_file
30 | else:
31 | print(f'\nCannot determine Preset type:\n{old_preset_file}')
32 | continue
33 |
34 | # Opening files to work with
35 | # Default Preset
36 | # Old Preset
37 | # New Preset
38 | with (
39 | open(default_preset_file, 'r') as default_preset,
40 | open(old_preset_file, 'r') as old_preset,
41 | open(new_preset_file, 'w') as new_preset
42 | ):
43 |
44 | default_preset = default_preset.read().splitlines()
45 | old_preset = old_preset.read().splitlines()
46 |
47 | # For each line of Default Preset
48 | for default_line in default_preset:
49 | default_line = default_line.strip()
50 |
51 | # Mode
52 | # Master Switch -> Enabled
53 | # Playback Gain -> Enabled
54 | # Keep Master Limiter & Playback Gain values to ViPER defaults
55 | if any(key in default_line for key in ('32775', '36868', '65586', '65587', '65588', '65565', '65566', '65567', '65568')):
56 | if (('36868' in default_line) or ("65565" in default_line)):
57 | new_preset.write(default_line.replace('false', 'true') + "\n")
58 | else:
59 | new_preset.write(default_line + "\n")
60 |
61 | # FET Compressor
62 | # ViPER DDC
63 | # Spectrum Extension
64 | # FIR Equalizer
65 | # Convolver
66 | # Field Surround
67 | # Headphone Surround +
68 | # Reverberation
69 | # Dynamic System
70 | # Tube Simulator (6N1J)
71 | # ViPER Bass
72 | # ViPER Clarity
73 | # Auditory System Protection
74 | # AnalogX
75 | # Speaker Optimization
76 | else:
77 |
78 | # For each line of Old Preset
79 | for old_line in old_preset:
80 | old_line = old_line.strip()
81 |
82 | # Keep Master Limiter & Playback Gain values to ViPER defaults
83 |
84 | # # Master Limiter
85 |
86 | # if ('65586' in default_line):
87 | # if (('65586' in old_line) or ('65608' in old_line)):
88 | # old_line = old_line.split('"')
89 | # if 21 < int(old_line[3]) :
90 | # # Normalizing to range of 0 - 21
91 | # old_line[3] = str([1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200].index(int(old_line[3])))
92 | # old_line = '"'.join(old_line)
93 | # new_preset.write(old_line.replace('65608', '65586') + "\n")
94 | # break
95 |
96 | # elif ('65588' in default_line):
97 | # if (('65588' in old_line) or ('65609' in old_line)):
98 | # old_line = old_line.split('"')
99 | # if 5 < int(old_line[3]) :
100 | # # Normalizing to range of 0 - 5
101 | # old_line[3] = str([30, 50, 70, 80, 90, 100].index(int(old_line[3])))
102 | # old_line = '"'.join(old_line)
103 | # new_preset.write(old_line.replace('65609', '65588') + "\n")
104 | # break
105 |
106 | # # Playback Gain Control
107 |
108 | # if (('65565' in default_line) and ('65604' in old_line)):
109 | # new_preset.write(old_line.replace('65604', '65565') + "\n")
110 | # break
111 |
112 | # elif ('65566' in default_line):
113 | # if (('65566' in old_line) or ('65605' in old_line)):
114 | # old_line = old_line.split('"')
115 | # if 2 < int(old_line[3]) :
116 | # # Normalizing to range of 0 - 2
117 | # old_line[3] = str([50, 100, 300].index(int(old_line[3])))
118 | # old_line = '"'.join(old_line)
119 | # new_preset.write(old_line.replace('65605', '65566') + "\n")
120 | # break
121 |
122 | # elif ('65567' in default_line):
123 | # if (('65567' in old_line) or ('65606' in old_line)):
124 | # old_line = old_line.split('"')
125 | # if 5 < int(old_line[3]) :
126 | # # Normalizing to range of 0 - 5
127 | # old_line[3] = str([30, 50, 70, 80, 90, 100].index(int(old_line[3])))
128 | # old_line = '"'.join(old_line)
129 | # new_preset.write(old_line.replace('65606', '65567') + "\n")
130 | # break
131 |
132 | # elif ('65568' in default_line):
133 | # if (('65568' in old_line) or ('65607' in old_line)):
134 | # old_line = old_line.split('"')
135 | # if 10 < int(old_line[3]) :
136 | # # Normalizing to range of 0 - 10
137 | # old_line[3] = str([100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 3000].index(int(old_line[3])))
138 | # old_line = '"'.join(old_line)
139 | # new_preset.write(old_line.replace('65607', '65568') + "\n")
140 | # break
141 |
142 | # FET Compressor
143 |
144 | if (('65610' in default_line) and ('65627' in old_line)):
145 | new_preset.write(old_line.replace('65627', '65610') + "\n")
146 | break
147 |
148 | elif (('65611' in default_line) and ('65628' in old_line)):
149 | new_preset.write(old_line.replace('65628', '65611') + "\n")
150 | break
151 |
152 | elif (('65612' in default_line) and ('65629' in old_line)):
153 | new_preset.write(old_line.replace('65629', '65612') + "\n")
154 | break
155 |
156 | elif (('65613' in default_line) and ('65630' in old_line)):
157 | new_preset.write(old_line.replace('65630', '65613') + "\n")
158 | break
159 |
160 | elif (('65614' in default_line) and ('65631' in old_line)):
161 | new_preset.write(old_line.replace('65631', '65614') + "\n")
162 | break
163 |
164 | elif (('65615' in default_line) and ('65632' in old_line)):
165 | new_preset.write(old_line.replace('65632', '65615') + "\n")
166 | break
167 |
168 | elif (('65616' in default_line) and ('65633' in old_line)):
169 | new_preset.write(old_line.replace('65633', '65616') + "\n")
170 | break
171 |
172 | elif (('65617' in default_line) and ('65634' in old_line)):
173 | new_preset.write(old_line.replace('65634', '65617') + "\n")
174 | break
175 |
176 | elif (('65618' in default_line) and ('65635' in old_line)):
177 | new_preset.write(old_line.replace('65635', '65618') + "\n")
178 | break
179 |
180 | elif (('65619' in default_line) and ('65636' in old_line)):
181 | new_preset.write(old_line.replace('65636', '65619') + "\n")
182 | break
183 |
184 | elif (('65620' in default_line) and ('65637' in old_line)):
185 | new_preset.write(old_line.replace('65637', '65620') + "\n")
186 | break
187 |
188 | elif (('65621' in default_line) and ('65638' in old_line)):
189 | new_preset.write(old_line.replace('65638', '65621') + "\n")
190 | break
191 |
192 | elif (('65622' in default_line) and ('65639' in old_line)):
193 | new_preset.write(old_line.replace('65639', '65622') + "\n")
194 | break
195 |
196 | elif (('65623' in default_line) and ('65640' in old_line)):
197 | new_preset.write(old_line.replace('65640', '65623') + "\n")
198 | break
199 |
200 | elif (('65624' in default_line) and ('65641' in old_line)):
201 | new_preset.write(old_line.replace('65641', '65624') + "\n")
202 | break
203 |
204 | elif (('65625' in default_line) and ('65642' in old_line)):
205 | new_preset.write(old_line.replace('65642', '65625') + "\n")
206 | break
207 |
208 | elif (('65626' in default_line) and ('65643' in old_line)):
209 | new_preset.write(old_line.replace('65643', '65626') + "\n")
210 | break
211 |
212 | # FIR Equalizer
213 |
214 | if (('65551' in default_line) and ('65595' in old_line)):
215 | new_preset.write(old_line.replace('65595', '65551') + "\n")
216 | break
217 |
218 | elif (('65552' in default_line) and ('65596' in old_line)):
219 | new_preset.write(old_line.replace('65596', '65552') + "\n")
220 | break
221 |
222 | # Convolver
223 |
224 | if (('65538' in default_line) and ('65589' in old_line)):
225 | new_preset.write(old_line.replace('65589', '65538') + "\n")
226 | break
227 |
228 | elif ('65540;65541;65542' in default_line):
229 | if (('65540;65541;65542' in old_line) or ('65591;65592;65593' in old_line)):
230 | # Correcting '&' in Kernel(.irs) names
231 | old_line = (
232 | old_line
233 | .replace('>amp;', '&')
234 | .replace('>Select impulse response fileamp;', '&')
235 | .replace('Select impulse response file', '')
236 | .replace('Kernel', '')
237 | .replace('Choose Impulse Response', '')
238 | .replace('Selecione o arquivo de impulso de resposta', '')
239 | )
240 | new_preset.write(old_line.replace('65591;65592;65593', '65540;65541;65542') + "\n")
241 | break
242 |
243 | elif (('65543' in default_line) and ('65594' in old_line)):
244 | new_preset.write(old_line.replace('65594', '65543') + "\n")
245 | break
246 |
247 | # Reverberation
248 |
249 | if (('65559' in default_line) and ('65597' in old_line)):
250 | new_preset.write(old_line.replace('65597', '65559') + "\n")
251 | break
252 |
253 | elif ('65560' in default_line):
254 | if (('65560' in old_line) or ('65598' in old_line)):
255 | old_line = old_line.split('"')
256 | if 10 < int(old_line[3]) :
257 | # Normalizing to range of 0 - 10
258 | old_line[3] = str(int(int(old_line[3])/10))
259 | old_line = '"'.join(old_line)
260 | new_preset.write(old_line.replace('65598', '65560') + "\n")
261 | break
262 |
263 | elif ('65561' in default_line):
264 | if (('65561' in old_line) or ('65599' in old_line)):
265 | old_line = old_line.split('"')
266 | if 10 < int(old_line[3]) :
267 | # Normalizing to range of 0 - 10
268 | old_line[3] = str(int(int(old_line[3])/10))
269 | old_line = '"'.join(old_line)
270 | new_preset.write(old_line.replace('65599', '65561') + "\n")
271 | break
272 |
273 | elif (('65562' in default_line) and ('65600' in old_line)):
274 | new_preset.write(old_line.replace('65600', '65562') + "\n")
275 | break
276 |
277 | elif (('65563' in default_line) and ('65601' in old_line)):
278 | new_preset.write(old_line.replace('65601', '65563') + "\n")
279 | break
280 |
281 | elif (('65564' in default_line) and ('65602' in old_line)):
282 | new_preset.write(old_line.replace('65602', '65564') + "\n")
283 | break
284 |
285 | # ViPER DDC
286 | # Spectrum Extention
287 | # Field Surround
288 | # Differential Surround
289 | # Headphone Surround +
290 | # Dynamic System
291 | # Tube Simulator (6N1J)
292 | # ViPER Bass
293 | # ViPER Clarity
294 | # Auditory System Protection
295 | # AnalogX
296 | if (default_line[default_line.find('"')+1:default_line.find('"', default_line.find('"')+1)] == old_line[old_line.find('"')+1:old_line.find('"', old_line.find('"')+1)]):
297 |
298 | # Field Surround
299 |
300 | if ('65554;65556' in default_line):
301 | old_line = old_line.split('"')
302 | if 8 < int(old_line[3]) :
303 | # Normalizing to range of 0 - 8
304 | old_line[3] = str(int((int(old_line[3])-120)/10))
305 | old_line = '"'.join(old_line)
306 | new_preset.write(old_line + "\n")
307 | break
308 |
309 | elif ('65555' in default_line):
310 | old_line = old_line.split('"')
311 | if 10 < int(old_line[3]) :
312 | # Normalizing to range of 0 - 10
313 | old_line[3] = str(int((int(old_line[3])-120)/10))
314 | old_line = '"'.join(old_line)
315 | new_preset.write(old_line + "\n")
316 | break
317 |
318 | # Differential Surround
319 |
320 | if ('65558' in default_line):
321 | old_line = old_line.split('"')
322 | if 19 < int(old_line[3]) :
323 | # Normalizing to range of 0 - 19
324 | old_line[3] = str(int((int(old_line[3])/100)-1))
325 | old_line = '"'.join(old_line)
326 | new_preset.write(old_line + "\n")
327 | break
328 |
329 | # Dynamic System
330 |
331 | if ('65573' in default_line):
332 | old_line = old_line.split('"')
333 | if 100 < int(old_line[3]) :
334 | # Normalizing to range of 0 - 100
335 | old_line[3] = str(int((int(old_line[3])-100)/20))
336 | old_line = '"'.join(old_line)
337 | new_preset.write(old_line + "\n")
338 | break
339 |
340 | # ViPER Bass
341 |
342 | if ('65576' in default_line):
343 | old_line = old_line.split('"')
344 | if 135 < int(old_line[3]) :
345 | # Normalizing to range of 0 - 135
346 | old_line[3] = str(int(int(old_line[3])-15))
347 | old_line = '"'.join(old_line)
348 | new_preset.write(old_line + "\n")
349 | break
350 |
351 | elif ('65577' in default_line):
352 | old_line = old_line.split('"')
353 | if 11 < int(old_line[3]) :
354 | # Normalizing to range of 0 - 11
355 | old_line[3] = str(int((int(old_line[3])-50)/50))
356 | old_line = '"'.join(old_line)
357 | new_preset.write(old_line + "\n")
358 | break
359 |
360 | # ViPER Clarity
361 |
362 | if ('65580' in default_line):
363 | old_line = old_line.split('"')
364 | if 9 < int(old_line[3]) :
365 | # Normalizing to range of 0 - 9
366 | old_line[3] = str(int(int(old_line[3])/50))
367 | old_line = '"'.join(old_line)
368 | new_preset.write(old_line + "\n")
369 | break
370 |
371 | # Remaining Features
372 | new_preset.write(old_line + "\n")
373 | break
374 |
375 | # Speaker Optimization
376 | else:
377 | new_preset.write(default_line + "\n")
378 |
379 | return(preset_converted_dir)
380 |
381 | if __name__ == "__main__":
382 | input_dir = Path('')
383 | output_dir = Path('')
384 | convert_presets(input_dir, output_dir)
385 |
--------------------------------------------------------------------------------
/pipe/extract.py:
--------------------------------------------------------------------------------
1 | import patoolib
2 | import shutil
3 | from pathlib import Path
4 | from utils.create_directories import create_directories
5 |
6 | # Supported archive formats
7 | ARCHIVE_EXTENSIONS = {'.rar', '.tar', '.zip', '.7z'}
8 |
9 | def extract_archive(archive_path: Path, extract_to: Path) -> bool:
10 | """Extract a single archive to the specified directory."""
11 | try:
12 | patoolib.extract_archive(
13 | str(archive_path),
14 | outdir=str(extract_to),
15 | verbosity=-1
16 | )
17 | return True
18 | except Exception as e:
19 | print(f'Failed to extract "{archive_path}": {e}')
20 | return False
21 |
22 | def copy_file(source: Path, destination: Path) -> None:
23 | """Copy a single file, creating parent directories if needed."""
24 | try:
25 | create_directories([destination.parent])
26 | shutil.copy2(source, destination)
27 | except Exception as e:
28 | # print(f'Failed to copy "{source}": {e}')
29 | pass
30 |
31 | def get_unique_directory(base_path: Path) -> Path:
32 | """Generate a unique directory path by appending a counter if needed."""
33 | if not base_path.exists():
34 | return base_path
35 |
36 | counter = 2
37 | while True:
38 | new_path = Path(f"{base_path}_{counter}")
39 | if not new_path.exists():
40 | return new_path
41 | counter += 1
42 |
43 | def process_directory(current_dir: Path, extract_dir: Path, relative_path: Path, processed_archives: set[Path]) -> None:
44 | """Process a directory recursively, handling both archives and target files."""
45 | # Process all items in the current directory
46 | for item in current_dir.iterdir():
47 | # Skip if item has been processed (prevents infinite loops)
48 | if item in processed_archives:
49 | continue
50 |
51 | if item.is_file():
52 | extension = item.suffix.lower()
53 |
54 | # Handle archives
55 | if extension in ARCHIVE_EXTENSIONS:
56 | processed_archives.add(item)
57 | extract_to = get_unique_directory(
58 | extract_dir/relative_path/item.stem
59 | )
60 |
61 | if extract_archive(item, extract_to):
62 | # Recursively process the extracted contents
63 | process_directory(
64 | extract_to,
65 | extract_dir,
66 | relative_path/item.stem,
67 | processed_archives
68 | )
69 |
70 | # Handle files
71 | else:
72 | destination = extract_dir/relative_path/item.name
73 | copy_file(item, destination)
74 |
75 | # Recursively process subdirectories
76 | elif item.is_dir():
77 | process_directory(
78 | item,
79 | extract_dir,
80 | relative_path/item.name,
81 | processed_archives
82 | )
83 |
84 | def extract_archives(input_dir: Path, output_dir: Path) -> Path:
85 | """Recursively extract archieves."""
86 | print("Extracting Archieves ...")
87 |
88 | extract_dir = output_dir/'extracted'
89 | create_directories([extract_dir])
90 |
91 | try:
92 | processed_archives: set[Path] = set()
93 | process_directory(input_dir, extract_dir, Path(), processed_archives)
94 | except Exception as e:
95 | print(f"Error during processing: {e}")
96 |
97 | return extract_dir
98 |
99 | if __name__ == "__main__":
100 | input_dir = Path('')
101 | output_dir = Path('')
102 | extract_archives(input_dir, output_dir)
103 |
--------------------------------------------------------------------------------
/pipe/filter.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | from collections import defaultdict
4 | from pathlib import Path
5 | from utils.create_directories import create_directories
6 | from utils.sha256 import sha256
7 |
8 | def verify_xml(xml_path: Path) -> bool:
9 | """
10 | Check if XML file contains ViPER-specific features.
11 | Returns True if the XML is ViPER-compatible, False otherwise.
12 | """
13 | with open(xml_path, 'r') as xml_file:
14 | xml_data = [line.strip() for line in xml_file.readlines()]
15 | check_features = [' None:
24 | """Copy file to target directory with hash-based deduplication and name conflict resolution."""
25 | file_hash = sha256(full_path)
26 |
27 | if (file_name not in hashes[file_hash]):
28 | hashes[file_hash].add(file_name)
29 |
30 | name_repeat_count = counts[file_name] + 1
31 | counts[file_name] = name_repeat_count
32 |
33 | if (1 < name_repeat_count):
34 | file_name = f'{file_name}_{name_repeat_count}'
35 |
36 | shutil.copy2(full_path, f'{target_dir/file_name}{file_extension}')
37 |
38 | def filter_irs_vdc_xml(input_dir: Path, output_dir: Path) -> Path:
39 | """Filter IRSs, VDCs & XMLs from a given directory with hash-based deduplication and name conflict resolution."""
40 | print("Filtering IRSs, VDCs & XMLs ...")
41 |
42 | filter_dir = output_dir/'filtered'
43 | irs_dir = filter_dir/'irs'
44 | vdc_dir = filter_dir/'vdc'
45 | xml_dir = filter_dir/'xml'
46 | create_directories([filter_dir, irs_dir, vdc_dir, xml_dir])
47 |
48 | irs_hashes, irs_counts = defaultdict(set), defaultdict(int)
49 | vdc_hashes, vdc_counts = defaultdict(set), defaultdict(int)
50 | xml_hashes, xml_counts = defaultdict(set), defaultdict(int)
51 |
52 | for root, _, files in os.walk(input_dir):
53 | root = Path(root)
54 |
55 | for file in files:
56 | full_path = root/file
57 |
58 | file_name = full_path.stem.strip()
59 | file_extension = full_path.suffix
60 |
61 | if (file_extension == '.irs'):
62 | copy_file(full_path, irs_dir, file_name, file_extension, irs_hashes, irs_counts)
63 |
64 | elif (file_extension == '.vdc'):
65 | copy_file(full_path, vdc_dir, file_name, file_extension, vdc_hashes, vdc_counts)
66 |
67 | elif (file_extension == '.xml'):
68 |
69 | verify_result = verify_xml(full_path)
70 |
71 | if (verify_result):
72 | if (file_name in ['bt_a2dp', 'headset', 'speaker', 'usb_device']):
73 | if (file_name == 'bt_a2dp'):
74 | file_name = 'bluetooth'
75 | elif (file_name == 'usb_device'):
76 | file_name = 'usb'
77 |
78 | new_file_name = f'{root.stem}{root.suffix}'.strip()
79 |
80 | if (not (any(keyword in new_file_name.lower() for keyword in ['bluetooth', 'headset', 'speaker', 'usb']))):
81 | new_file_name = f'{new_file_name}-{file_name}'
82 |
83 | else:
84 | new_file_name = file_name
85 |
86 | copy_file(full_path, xml_dir, new_file_name, file_extension, xml_hashes, xml_counts)
87 |
88 | return(irs_dir, vdc_dir, xml_dir)
89 |
90 | if __name__ == "__main__":
91 | input_dir = Path('')
92 | output_dir = Path('')
93 | filter_irs_vdc_xml(input_dir, output_dir)
94 |
--------------------------------------------------------------------------------
/pipe/release.py:
--------------------------------------------------------------------------------
1 | import ast
2 | import os
3 | import shutil
4 | from dataclasses import dataclass
5 | from pathlib import Path
6 | from utils.create_directories import create_directories
7 | from utils.release_utils.check_duplicates import check_duplicates
8 | from utils.release_utils.list_missings import list_missings
9 | from utils.release_utils.search_in_xml import search_in_xml
10 |
11 | # Preset (.xml) files whose names contain any of these keywords
12 | # are considered original and selected from duplicates.
13 | whitelist = [
14 | "Bee", "Devarim", "Deiwid63", "Inner_Fidelity", "J144df",
15 | "Joe_Meek", "Joemeek", "Percocet", "Roi007leaf", "Stormviper",
16 | "Smeejaytee", "V4ARISE", "Japanese", "Joe0Bloggs"
17 | ]
18 |
19 | @dataclass
20 | class ReleaseFiles:
21 | """Represents the file structure for a release variant"""
22 | base_dir: Path
23 | kernel_dir: Path
24 | ddc_dir: Path
25 | preset_dir: Path
26 |
27 | @classmethod
28 | def create(cls, base_path: Path, variant_name: str) -> 'ReleaseFiles':
29 | base_dir = base_path/variant_name
30 | kernel_dir = base_dir/'Kernel'
31 | ddc_dir = base_dir/'DDC'
32 | preset_dir = base_dir/'Preset'
33 |
34 | create_directories([base_dir, kernel_dir, ddc_dir, preset_dir])
35 | return cls(base_dir, kernel_dir, ddc_dir, preset_dir)
36 |
37 | def copy_directory_contents(source_dir: Path, dest_dir: Path) -> None:
38 | """Copy all files from source directory to destination directory"""
39 | for filename in os.listdir(source_dir):
40 | shutil.copy2(source_dir/filename, dest_dir/filename)
41 |
42 | def process_xml_dependencies(xml_path: Path, irs_dir: Path, vdc_dir: Path, target_irs: Path, target_vdc: Path) -> None:
43 | """Process and copy IRS/VDC files referenced in XML"""
44 | # Check for IRS dependency
45 | if irs_path := search_in_xml(xml_path, "65540;65541;65542"):
46 | try:
47 | shutil.copy2(irs_dir/irs_path, target_irs)
48 | except (FileNotFoundError, shutil.Error):
49 | pass
50 |
51 | # Check for VDC dependency
52 | if vdc_path := search_in_xml(xml_path, "65547"):
53 | try:
54 | shutil.copy2(vdc_dir/vdc_path, target_vdc)
55 | except (FileNotFoundError, shutil.Error):
56 | pass
57 |
58 | def select_whitelist_xml(xml_list: list[str], whitelist: list[str]) -> str:
59 | """
60 | Select XML name from the list based on whitelist criteria.
61 | Returns the first matching name after sorting, or the first name in original list if no matches.
62 | """
63 | # Find all names containing whitelist words
64 | matching_names = [
65 | name for name in xml_list
66 | if any(word.lower() in name.lower() for word in whitelist)
67 | ]
68 |
69 | # If we found matches, sort them and return the first one
70 | if matching_names:
71 | return sorted(matching_names)[0]
72 |
73 | # If no matches, return the first name from original list
74 | return xml_list[0]
75 |
76 | def create_full_release(source: ReleaseFiles, release_dir: Path) -> ReleaseFiles:
77 | """Create full release"""
78 | full = ReleaseFiles.create(release_dir, 'Full')
79 |
80 | copy_directory_contents(source.kernel_dir, full.kernel_dir)
81 | copy_directory_contents(source.ddc_dir, full.ddc_dir)
82 | copy_directory_contents(source.preset_dir, full.preset_dir)
83 |
84 | return full
85 |
86 | def create_lite_release(source: ReleaseFiles, release_dir: Path, dup_files: tuple[Path, Path, Path]) -> ReleaseFiles:
87 | """Create lite release"""
88 | lite = ReleaseFiles.create(release_dir, 'Lite')
89 | _, _, dup_xml_path = dup_files
90 |
91 | with open(dup_xml_path, 'r') as f:
92 | for line in f:
93 | parts = line.split(' : ')
94 | count = int(parts[0]) # Number of duplicates
95 | xml_list = ast.literal_eval(parts[-1])
96 |
97 | # Only apply whitelist selection for groups with duplicates
98 | if count > 1:
99 | xml_name = select_whitelist_xml(xml_list, whitelist)
100 | else:
101 | xml_name = xml_list[0]
102 |
103 | xml_path = source.preset_dir/f'{xml_name}.xml'
104 |
105 | # Copy XML and its dependencies
106 | shutil.copy2(xml_path, lite.preset_dir)
107 | process_xml_dependencies(xml_path, source.kernel_dir, source.ddc_dir, lite.kernel_dir, lite.ddc_dir)
108 |
109 | return lite
110 |
111 | def create_recommended_release(full: ReleaseFiles, lite: ReleaseFiles, release_dir: Path, dup_files: tuple[Path, Path, Path]) -> ReleaseFiles:
112 | """Create recommended release"""
113 | recommended = ReleaseFiles.create(release_dir, 'Recommended')
114 | dup_irs_path, dup_vdc_path, _ = dup_files
115 |
116 | # Start with lite contents
117 | copy_directory_contents(lite.kernel_dir, recommended.kernel_dir)
118 | copy_directory_contents(lite.ddc_dir, recommended.ddc_dir)
119 | copy_directory_contents(lite.preset_dir, recommended.preset_dir)
120 |
121 | # Process IRS duplicates
122 | present_irs = {Path(f).stem for f in os.listdir(recommended.kernel_dir)}
123 | with open(dup_irs_path, 'r') as f:
124 | for line in f:
125 | irs_list = ast.literal_eval(line.split(' : ')[-1])
126 | if not any(irs in present_irs for irs in irs_list):
127 | shutil.copy2(full.kernel_dir/f'{irs_list[0]}.irs', recommended.kernel_dir)
128 |
129 | # Process VDC duplicates
130 | present_vdc = {Path(f).stem for f in os.listdir(recommended.ddc_dir)}
131 | with open(dup_vdc_path, 'r') as f:
132 | for line in f:
133 | vdc_list = ast.literal_eval(line.split(' : ')[-1])
134 | if not any(vdc in present_vdc for vdc in vdc_list):
135 | shutil.copy2(full.ddc_dir/f'{vdc_list[0]}.vdc', recommended.ddc_dir)
136 |
137 | return recommended
138 |
139 | def create_release(irs_dir: Path, vdc_dir: Path, xml_dir: Path, output_dir: Path, version: str) -> Path:
140 | """Create new release with 3 variants - Full, Lite & Recommended"""
141 | print(f"Creating Release {version} ...")
142 |
143 | release_dir = output_dir/version
144 | create_directories([release_dir])
145 |
146 | # Create source structure
147 | source = ReleaseFiles(release_dir, irs_dir, vdc_dir, xml_dir)
148 |
149 | # Create full release
150 | full = create_full_release(source, release_dir)
151 | list_missings(full.kernel_dir, full.ddc_dir, full.preset_dir, full.base_dir)
152 | dup_files = check_duplicates(full.kernel_dir, full.ddc_dir, full.preset_dir, full.base_dir)
153 |
154 | # Create lite release
155 | lite = create_lite_release(source, release_dir, dup_files)
156 | list_missings(lite.kernel_dir, lite.ddc_dir, lite.preset_dir, lite.base_dir)
157 | check_duplicates(lite.kernel_dir, lite.ddc_dir, lite.preset_dir, lite.base_dir)
158 |
159 | # Create recommended release
160 | recommended = create_recommended_release(full, lite, release_dir, dup_files)
161 | list_missings(recommended.kernel_dir, recommended.ddc_dir, recommended.preset_dir, recommended.base_dir)
162 | check_duplicates(recommended.kernel_dir, recommended.ddc_dir, recommended.preset_dir, recommended.base_dir)
163 |
164 | return release_dir
165 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | patool
2 | gdown
3 |
--------------------------------------------------------------------------------
/utils/create_directories.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | def create_directories(directories: list[Path]) -> None:
4 | """
5 | Creates multiple directories specified in the input list.
6 |
7 | Args:
8 | directories (list[Path]): A list of Path objects representing the directories to be created.
9 |
10 | Each directory is created with the following options:
11 | - `parents=True`: This allows the creation of any intermediate directories if they do not exist.
12 | - `exist_ok=True`: This prevents an error if a directory already exists.
13 | """
14 | for dir in directories:
15 | dir.mkdir(parents=True, exist_ok=True)
16 |
--------------------------------------------------------------------------------
/utils/release_utils/check_duplicates.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections import defaultdict
3 | from pathlib import Path
4 | from utils.create_directories import create_directories
5 | from utils.sha256 import sha256
6 |
7 | def write_duplicates_to_file(hashes: defaultdict, filename: Path) -> None:
8 | with open(filename, 'w') as dup_txt:
9 | duplicates = []
10 |
11 | for key, value in hashes.items():
12 | value = sorted(value)
13 | duplicates.append(f'{len(value)} : {key} : {value}\n')
14 |
15 | duplicates = sorted(duplicates, key=lambda x: (int(x.split(' : ')[0]), x.split(' : ')[1]), reverse=True)
16 | dup_txt.writelines(duplicates)
17 |
18 | def process_directory(directory: Path, hashes: defaultdict) -> None:
19 | for root, _, files in os.walk(directory):
20 | root = Path(root)
21 |
22 | for file in files:
23 | full_path = root/file
24 | file_name = full_path.stem
25 |
26 | file_hash = sha256(full_path)
27 |
28 | if file_name not in hashes[file_hash]:
29 | hashes[file_hash].add(file_name)
30 |
31 | def check_duplicates(irs_dir: Path, vdc_dir: Path, xml_dir: Path, output_dir: Path) -> tuple[Path, Path, Path]:
32 | """Check for duplicate IRSs, VDCs & XMLs and list them in dup.txt"""
33 |
34 | create_directories([output_dir])
35 |
36 | irs_hashes, dup_irs_txt = defaultdict(set), output_dir/'dup_irs.txt'
37 | vdc_hashes, dup_vdc_txt = defaultdict(set), output_dir/'dup_vdc.txt'
38 | xml_hashes, dup_xml_txt = defaultdict(set), output_dir/'dup_xml.txt'
39 |
40 | process_directory(irs_dir, irs_hashes)
41 | process_directory(vdc_dir, vdc_hashes)
42 | process_directory(xml_dir, xml_hashes)
43 |
44 | write_duplicates_to_file(irs_hashes, dup_irs_txt)
45 | write_duplicates_to_file(vdc_hashes, dup_vdc_txt)
46 | write_duplicates_to_file(xml_hashes, dup_xml_txt)
47 |
48 | return(dup_irs_txt, dup_vdc_txt, dup_xml_txt)
49 |
--------------------------------------------------------------------------------
/utils/release_utils/list_missings.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections import defaultdict
3 | from pathlib import Path
4 | from utils.release_utils.search_in_xml import search_in_xml
5 |
6 | def list_missings(irs_dir: Path, vdc_dir: Path, xml_dir: Path, output_dir: Path) -> None:
7 | """List missing IRSs & VDCs in missing.txt"""
8 |
9 | missing_irs = defaultdict(set)
10 | missing_vdc = defaultdict(set)
11 |
12 | for root, _, files in os.walk(xml_dir):
13 | root = Path(root)
14 |
15 | for file in files:
16 | xml = root/file
17 |
18 | irs = search_in_xml(xml, "65540;65541;65542")
19 | if ((irs != None) and not (os.path.isfile(irs_dir/irs))):
20 | missing_irs[irs].add(file)
21 |
22 | vdc = search_in_xml(xml, "65547")
23 | if ((vdc != None) and not (os.path.isfile(vdc_dir/vdc))):
24 | missing_vdc[vdc].add(file)
25 |
26 | with open(output_dir/'missing.txt', 'w') as file:
27 | missing = ["[IRS]\n"]
28 |
29 | temp_missing = []
30 | for key, value in missing_irs.items():
31 | value = sorted(value)
32 | temp_missing.append(f'{len(value)} : {key} : {value}\n')
33 |
34 | temp_missing = sorted(temp_missing, key=lambda x: (int(x.split(' : ')[0]), x.split(' : ')[1]), reverse=True)
35 | missing.extend(temp_missing)
36 |
37 | missing.append("\n[VDC]\n")
38 |
39 | temp_missing = []
40 | for key, value in missing_vdc.items():
41 | value = sorted(value)
42 | temp_missing.append(f'{len(value)} : {key} : {value}\n')
43 |
44 | temp_missing = sorted(temp_missing, key=lambda x: (int(x.split(' : ')[0]), x.split(' : ')[1]), reverse=True)
45 | missing.extend(temp_missing)
46 |
47 | file.writelines(missing)
48 |
--------------------------------------------------------------------------------
/utils/release_utils/search_in_xml.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as ET
2 | from pathlib import Path
3 |
4 | def search_in_xml(xml: Path, key: str) -> (str | None):
5 | """
6 | Seach for a key in ViPER XML and return it's value.
7 | """
8 | tree = ET.parse(xml)
9 | root = tree.getroot()
10 |
11 | for elem in root.findall('.//'):
12 | if elem.get('name') == key:
13 | return elem.text or elem.get('value')
14 |
15 | return None
16 |
--------------------------------------------------------------------------------
/utils/sha256.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 |
3 | def sha256(root) -> str:
4 | """
5 | Computes the SHA-256 hash of a file.
6 |
7 | This function reads the file in binary mode, processes it in chunks (to handle large files),
8 | and computes its SHA-256 hash.
9 |
10 | Args:
11 | root (str): The file path of the file to be hashed.
12 |
13 | Returns:
14 | str: The hexadecimal SHA-256 hash of the file content.
15 | """
16 |
17 | hasher = hashlib.sha256()
18 |
19 | with open(root, 'rb') as f:
20 | for chunk in iter(lambda: f.read(1024), b''):
21 | hasher.update(chunk)
22 |
23 | return hasher.hexdigest()
24 |
--------------------------------------------------------------------------------