├── README.md
├── core
├── __init__.py
└── main.py
├── image
├── default.png
└── nuclei.png
├── jsfind
├── requirements.txt
└── setup.py
/README.md:
--------------------------------------------------------------------------------
1 |
2 | Js Finding
3 |
4 |
5 | JS Finding is a Python tool for extracting JavaScript (JS) files from a given list of domains. This tool utilizes various utilities such as `waybackurls`, `gauplus`, and `subjs` to perform JS file extraction from the specified domains.
6 |
7 | ## Features
8 |
9 | - Extract JavaScript (JS) files from a list of url/domains
10 | - Supports extraction through `waybackurls`, `gauplus`, and `subjs`
11 | - Option to download the successfully extracted JS files
12 | - Option to create wordlists from the downloaded JS file contents
13 |
14 | ## Requirements
15 |
16 | - Python 3.x
17 |
18 | ## Installation
19 | ```
20 | pip3 install git+https://github.com/pikpikcu/js-finding.git -v
21 | ```
22 | ## Usage
23 |
24 | JS Finding can be used to extract JavaScript (JS) files from either a single domain URL or a list of domains. The tool supports various extraction methods and provides additional options for file download and wordlists creation.
25 |
26 | ```
27 | usage: jsfind [-h] [-u URL | -l FILE] [-o OUTPUT] [-d] [-dl] [-r RETRIES] [-od OUTPUT_DIR] [-w] [-p PROXY]
28 |
29 | Extract JS files from given domains.
30 |
31 | optional arguments:
32 | -h, --help show this help message and exit
33 | -u URL, --url URL Single domain URL
34 | -l FILE, --list FILE A file containing a list of domains
35 | -o OUTPUT, --output OUTPUT
36 | The output file to store the results
37 | -d, --debug Enable debug output
38 | -dl, --download Enable file download
39 | -r int, --retries int
40 | Number of retries for download attempts (default: 3)
41 | -od OUTPUT_DIR, --output-dir OUTPUT_DIR
42 | The directory to store downloaded files
43 | -w, --create-wordlists
44 | Enable wordlists creation
45 | -p PROXY, --proxy PROXY
46 | Use a proxy server for requests
47 | ```
48 |
49 | ## Examples
50 |
51 | Extract JS from a single domain:
52 |
53 | ```
54 | jsfind -u https://example.com -o output.txt -dl -od downloaded_files -w
55 | ```
56 |
57 | Extract JS from a list of domains:
58 |
59 | ```
60 | jsfind.py -l domains.txt -o output.txt -dl -od downloaded_files -w
61 | ```
62 |
63 |
64 | ### JS Analyse with nuclei
65 |
66 | 
67 |
68 | ### Notes
69 |
70 | - Make sure to install all the required dependencies before running this tool.
71 | - Verify that commands such as `waybackurls`, `gauplus`, and `subjs` are already in your system's PATH.
72 |
73 |
74 | ### Contributing
75 | Contributions are welcome! If you find any issues or have suggestions for improvements, please open an issue or submit a pull request.
76 |
--------------------------------------------------------------------------------
/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pikpikcu/js-finding/1fd99787b694eb79e10b3a85462a377bf236a76d/core/__init__.py
--------------------------------------------------------------------------------
/core/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import subprocess
5 | import sys
6 | import time
7 | import re
8 | import requests
9 | import pyfiglet
10 | import os
11 | from colorama import Fore, Style, init
12 | import socks
13 | import socket
14 | from urllib.parse import urlparse
15 |
16 |
17 | def check_tool_installed(tool_name):
18 | try:
19 | subprocess.run([tool_name, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
20 | return True
21 | except FileNotFoundError:
22 | return False
23 |
24 |
25 | def install_tool(tool_name, install_command):
26 | print(f"{Fore.YELLOW}The '{tool_name}' tool is not installed. Installing it now...{Style.RESET_ALL}")
27 | try:
28 | subprocess.run(install_command, shell=True, check=True)
29 | print(f"{Fore.GREEN}Installation of '{tool_name}' is complete.{Style.RESET_ALL}")
30 | except subprocess.CalledProcessError:
31 | print(f"{Fore.RED}Failed to install '{tool_name}'. Please install it manually.{Style.RESET_ALL}")
32 | sys.exit(1)
33 |
34 | def check_dependencies():
35 | if not check_tool_installed("waybackurls"):
36 | install_tool("Waybackurls", "go install github.com/tomnomnom/waybackurls@latest")
37 |
38 | if not check_tool_installed("gauplus"):
39 | install_tool("Gauplus", "go install github.com/bp0lr/gauplus@latest")
40 |
41 | if not check_tool_installed("subjs"):
42 | install_tool("Subjs", "go install github.com/lc/subjs@latest")
43 |
44 | def download_file(url, output_dir, retries):
45 | try:
46 | os.makedirs(output_dir, exist_ok=True) # Create directory outside the loop
47 |
48 | for _ in range(retries):
49 | response = requests.get(url)
50 | if response.status_code == 200:
51 | file_name = url.split("/")[-1]
52 | file_path = os.path.join(output_dir, file_name)
53 |
54 | with open(file_path, "wb") as f:
55 | f.write(response.content)
56 | return file_path
57 | else:
58 | print(f"[*] Download error: {url} {response.status_code}")
59 |
60 | print(f"[*] Download failed after {retries} retries: {url}")
61 | return None
62 | except requests.exceptions.RequestException:
63 | print(f"[x] Domain No resolved: {url}")
64 | return None
65 |
66 | def create_wordlists(file_path):
67 | wordlists = set()
68 | with open(file_path, 'r') as file:
69 | content = file.read()
70 | # Extract words from the file using regular expressions
71 | words = re.findall(r'\b\w+\b', content)
72 | wordlists.update(words)
73 | return wordlists
74 |
75 | def extract_js(domain, debug, download_files, output_dir, create_lists, retries):
76 | result = []
77 |
78 | try:
79 | # start the timer
80 | start_time = time.time()
81 |
82 | # Print extracting message
83 | print(f"Extracting JS from: {domain}")
84 |
85 | # initialize subprocesses
86 | processes = {
87 | "Waybackurls": subprocess.Popen(['waybackurls', domain], stdout=subprocess.PIPE, stderr=subprocess.PIPE),
88 | "Gauplus": subprocess.Popen(['gauplus', domain], stdout=subprocess.PIPE, stderr=subprocess.PIPE),
89 | "Subjs": subprocess.Popen(['subjs'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE),
90 | }
91 |
92 | outputs = {}
93 |
94 | for name, process in processes.items():
95 | # print debug info
96 | if debug:
97 | print(f"Extracting JS With: {name}")
98 |
99 | # run subprocess and get its output and error
100 | if name == "Subjs":
101 | output, error = process.communicate(input=domain.encode('utf-8'))
102 | else:
103 | output, error = process.communicate()
104 |
105 | output = output.decode('utf-8').splitlines()
106 |
107 | # filter output to include only .js files
108 | if name in ["Waybackurls", "Gauplus"]:
109 | output = [url for url in output if re.search(r"\.js$", url)]
110 |
111 | # print debug info from subprocess
112 | if debug and error:
113 | print(f"{name} error: {error.decode('utf-8')}")
114 |
115 | outputs[name] = output
116 |
117 | # stop the timer
118 | end_time = time.time()
119 |
120 | # print debug info
121 | if debug:
122 | print(f"Extraction completed in: {end_time - start_time} seconds")
123 |
124 | # store the extracted output
125 | result.append(f"Extrak domain: {domain}")
126 | result.append("Results Url JS:")
127 |
128 | # combine the results from waybackurls, gauplus, and subjs
129 | for output in set(outputs["Waybackurls"]).union(set(outputs["Gauplus"])).union(set(outputs["Subjs"])):
130 | result.append(f"- {output}")
131 |
132 | if download_files and output_dir:
133 | file_path = download_file(output, output_dir, retries)
134 | if file_path:
135 | result.append(f" - File downloaded: {file_path}")
136 | if create_lists:
137 | wordlists = create_wordlists(file_path)
138 | wordlists_file_path = f"{file_path}.wordlists"
139 | with open(wordlists_file_path, 'w') as wordlists_file:
140 | wordlists_file.write("\n".join(wordlists))
141 | result.append(f" - Wordlists created: {wordlists_file_path}")
142 | else:
143 | result.append(f" - Failed to download file")
144 |
145 | except Exception as e:
146 | result.append(f"Error occurred: {str(e)}")
147 | sys.exit(1)
148 |
149 | return result
150 |
151 | def main():
152 | # Initialize colorama
153 | init()
154 |
155 | # Check tools requirement
156 | check_dependencies()
157 |
158 | # Print banner with codename and version
159 | codename = "JS Finding"
160 | version = "1.002"
161 | banner = pyfiglet.Figlet(font="slant").renderText(codename)
162 | banner += f"{version.center(len(codename))}\n"
163 | print(Fore.GREEN + banner + Style.RESET_ALL)
164 |
165 | # Setup argparse
166 | parser = argparse.ArgumentParser(description='Extract JS files from given domains.')
167 | group = parser.add_mutually_exclusive_group(required=False)
168 | group.add_argument('-u', '--url', metavar='url', type=str, help='Single domain URL')
169 | group.add_argument('-l', '--list', metavar='file', type=str, help='A file containing a list of domains')
170 | parser.add_argument('-o', '--output', metavar='output', type=str, help='The output file to store the results')
171 | parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output')
172 | parser.add_argument('-dl', '--download', action='store_true', help='Enable file download')
173 | parser.add_argument('-r', '--retries', metavar='int', type=int, default=3, help='Number of retries for download attempts')
174 | parser.add_argument('-od', '--output-dir', metavar='dir', type=str, help='The directory to store downloaded files')
175 | parser.add_argument('-w', '--create-wordlists', action='store_true', help='Enable wordlists creation')
176 | parser.add_argument('-p', '--proxy', metavar='proxy', type=str, help='Use a proxy server for requests')
177 |
178 | args = parser.parse_args()
179 |
180 | results = []
181 |
182 | # Set proxy if provided
183 | if args.proxy:
184 | parsed_proxy = urlparse(args.proxy)
185 | proxy_type = parsed_proxy.scheme.lower()
186 | proxy_host = parsed_proxy.hostname
187 | proxy_port = parsed_proxy.port
188 | proxy_username = parsed_proxy.username
189 | proxy_password = parsed_proxy.password
190 |
191 | if proxy_type == 'http' or proxy_type == 'https':
192 | proxies = {
193 | 'http': args.proxy,
194 | 'https': args.proxy
195 | }
196 | elif proxy_type == 'socks4':
197 | socks.setdefaultproxy(socks.SOCKS4, proxy_host, proxy_port)
198 | socket.socket = socks.socksocket
199 | elif proxy_type == 'socks5':
200 | socks.setdefaultproxy(socks.SOCKS5, proxy_host, proxy_port)
201 | socket.socket = socks.socksocket
202 | if proxy_username and proxy_password:
203 | socks.wrapmodule(requests)
204 | requests.get = socks.socksocket.get
205 | else:
206 | raise ValueError('Unsupported proxy type')
207 | else:
208 | proxies = None
209 |
210 | # Read piped input if available
211 | if not sys.stdin.isatty():
212 | input_lines = sys.stdin.readlines()
213 | for line in input_lines:
214 | line = line.strip()
215 | extracted = extract_js(line, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
216 | results += extracted
217 | results.append("") # Add an empty line for readability
218 | print("\n".join(extracted))
219 | print()
220 |
221 | # Process URL or list of domains if provided
222 | if args.url:
223 | extracted = extract_js(args.url, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
224 | results += extracted
225 | results.append("") # Add an empty line for readability
226 | print("\n".join(extracted))
227 | print()
228 |
229 | if args.list:
230 | with open(args.list, 'r') as f:
231 | domains = f.read().splitlines()
232 | for domain in domains:
233 | extracted = extract_js(domain, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
234 | results += extracted
235 | results.append("") # Add an empty line for readability
236 | print("\n".join(extracted))
237 | print()
238 |
239 | # Write the results to the output file
240 | if args.output:
241 | output_content = "\n".join(results)
242 | with open(args.output, 'w') as f:
243 | f.write(output_content)
244 | print(Fore.CYAN + f"Results written to {args.output}" + Style.RESET_ALL)
245 |
246 | # Create output directory if specified
247 | if args.output_dir:
248 | output_dir = os.path.abspath(args.output_dir)
249 | os.makedirs(output_dir, exist_ok=True)
250 | print(Fore.CYAN + f"Output directory created: {output_dir}" + Style.RESET_ALL)
251 |
252 | if __name__ == "__main__":
253 | main()
254 |
--------------------------------------------------------------------------------
/image/default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pikpikcu/js-finding/1fd99787b694eb79e10b3a85462a377bf236a76d/image/default.png
--------------------------------------------------------------------------------
/image/nuclei.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pikpikcu/js-finding/1fd99787b694eb79e10b3a85462a377bf236a76d/image/nuclei.png
--------------------------------------------------------------------------------
/jsfind:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import subprocess
5 | import sys
6 | import time
7 | import re
8 | import requests
9 | import pyfiglet
10 | import os
11 | from colorama import Fore, Style, init
12 | import socks
13 | import socket
14 | from urllib.parse import urlparse
15 |
16 |
17 | def check_tool_installed(tool_name):
18 | try:
19 | subprocess.run([tool_name, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
20 | return True
21 | except FileNotFoundError:
22 | return False
23 |
24 |
25 | def install_tool(tool_name, install_command):
26 | print(f"{Fore.YELLOW}The '{tool_name}' tool is not installed. Installing it now...{Style.RESET_ALL}")
27 | try:
28 | subprocess.run(install_command, shell=True, check=True)
29 | print(f"{Fore.GREEN}Installation of '{tool_name}' is complete.{Style.RESET_ALL}")
30 | except subprocess.CalledProcessError:
31 | print(f"{Fore.RED}Failed to install '{tool_name}'. Please install it manually.{Style.RESET_ALL}")
32 | sys.exit(1)
33 |
34 | def check_dependencies():
35 | if not check_tool_installed("waybackurls"):
36 | install_tool("Waybackurls", "go install github.com/tomnomnom/waybackurls@latest")
37 |
38 | if not check_tool_installed("gauplus"):
39 | install_tool("Gauplus", "go install github.com/bp0lr/gauplus@latest")
40 |
41 | if not check_tool_installed("subjs"):
42 | install_tool("Subjs", "go install github.com/lc/subjs@latest")
43 |
44 | def download_file(url, output_dir, retries):
45 | try:
46 | os.makedirs(output_dir, exist_ok=True) # Create directory outside the loop
47 |
48 | for _ in range(retries):
49 | response = requests.get(url)
50 | if response.status_code == 200:
51 | file_name = url.split("/")[-1]
52 | file_path = os.path.join(output_dir, file_name)
53 |
54 | with open(file_path, "wb") as f:
55 | f.write(response.content)
56 | return file_path
57 | else:
58 | print(f"[*] Download error: {url} {response.status_code}")
59 |
60 | print(f"[*] Download failed after {retries} retries: {url}")
61 | return None
62 | except requests.exceptions.RequestException:
63 | print(f"[x] Domain No resolved: {url}")
64 | return None
65 |
66 | def create_wordlists(file_path):
67 | wordlists = set()
68 | with open(file_path, 'r') as file:
69 | content = file.read()
70 | # Extract words from the file using regular expressions
71 | words = re.findall(r'\b\w+\b', content)
72 | wordlists.update(words)
73 | return wordlists
74 |
75 | def extract_js(domain, debug, download_files, output_dir, create_lists, retries):
76 | result = []
77 |
78 | try:
79 | # start the timer
80 | start_time = time.time()
81 |
82 | # Print extracting message
83 | print(f"Extracting JS from: {domain}")
84 |
85 | # initialize subprocesses
86 | processes = {
87 | "Waybackurls": subprocess.Popen(['waybackurls', domain], stdout=subprocess.PIPE, stderr=subprocess.PIPE),
88 | "Gauplus": subprocess.Popen(['gauplus', domain], stdout=subprocess.PIPE, stderr=subprocess.PIPE),
89 | "Subjs": subprocess.Popen(['subjs'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE),
90 | }
91 |
92 | outputs = {}
93 |
94 | for name, process in processes.items():
95 | # print debug info
96 | if debug:
97 | print(f"Extracting JS With: {name}")
98 |
99 | # run subprocess and get its output and error
100 | if name == "Subjs":
101 | output, error = process.communicate(input=domain.encode('utf-8'))
102 | else:
103 | output, error = process.communicate()
104 |
105 | output = output.decode('utf-8').splitlines()
106 |
107 | # filter output to include only .js files
108 | if name in ["Waybackurls", "Gauplus"]:
109 | output = [url for url in output if re.search(r"\.js$", url)]
110 |
111 | # print debug info from subprocess
112 | if debug and error:
113 | print(f"{name} error: {error.decode('utf-8')}")
114 |
115 | outputs[name] = output
116 |
117 | # stop the timer
118 | end_time = time.time()
119 |
120 | # print debug info
121 | if debug:
122 | print(f"Extraction completed in: {end_time - start_time} seconds")
123 |
124 | # store the extracted output
125 | result.append(f"Extrak domain: {domain}")
126 | result.append("Results Url JS:")
127 |
128 | # combine the results from waybackurls, gauplus, and subjs
129 | for output in set(outputs["Waybackurls"]).union(set(outputs["Gauplus"])).union(set(outputs["Subjs"])):
130 | result.append(f"- {output}")
131 |
132 | if download_files and output_dir:
133 | file_path = download_file(output, output_dir, retries)
134 | if file_path:
135 | result.append(f" - File downloaded: {file_path}")
136 | if create_lists:
137 | wordlists = create_wordlists(file_path)
138 | wordlists_file_path = f"{file_path}.wordlists"
139 | with open(wordlists_file_path, 'w') as wordlists_file:
140 | wordlists_file.write("\n".join(wordlists))
141 | result.append(f" - Wordlists created: {wordlists_file_path}")
142 | else:
143 | result.append(f" - Failed to download file")
144 |
145 | except Exception as e:
146 | result.append(f"Error occurred: {str(e)}")
147 | sys.exit(1)
148 |
149 | return result
150 |
151 | def main():
152 | # Initialize colorama
153 | init()
154 |
155 | # Check tools requirement
156 | check_dependencies()
157 |
158 | # Print banner with codename and version
159 | codename = "JS Finding"
160 | version = "1.002"
161 | banner = pyfiglet.Figlet(font="slant").renderText(codename)
162 | banner += f"{version.center(len(codename))}\n"
163 | print(Fore.GREEN + banner + Style.RESET_ALL)
164 |
165 | # Setup argparse
166 | parser = argparse.ArgumentParser(description='Extract JS files from given domains.')
167 | group = parser.add_mutually_exclusive_group(required=False)
168 | group.add_argument('-u', '--url', metavar='url', type=str, help='Single domain URL')
169 | group.add_argument('-l', '--list', metavar='file', type=str, help='A file containing a list of domains')
170 | parser.add_argument('-o', '--output', metavar='output', type=str, help='The output file to store the results')
171 | parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output')
172 | parser.add_argument('-dl', '--download', action='store_true', help='Enable file download')
173 | parser.add_argument('-r', '--retries', metavar='int', type=int, default=3, help='Number of retries for download attempts')
174 | parser.add_argument('-od', '--output-dir', metavar='dir', type=str, help='The directory to store downloaded files')
175 | parser.add_argument('-w', '--create-wordlists', action='store_true', help='Enable wordlists creation')
176 | parser.add_argument('-p', '--proxy', metavar='proxy', type=str, help='Use a proxy server for requests')
177 |
178 | args = parser.parse_args()
179 |
180 | results = []
181 |
182 | # Set proxy if provided
183 | if args.proxy:
184 | parsed_proxy = urlparse(args.proxy)
185 | proxy_type = parsed_proxy.scheme.lower()
186 | proxy_host = parsed_proxy.hostname
187 | proxy_port = parsed_proxy.port
188 | proxy_username = parsed_proxy.username
189 | proxy_password = parsed_proxy.password
190 |
191 | if proxy_type == 'http' or proxy_type == 'https':
192 | proxies = {
193 | 'http': args.proxy,
194 | 'https': args.proxy
195 | }
196 | elif proxy_type == 'socks4':
197 | socks.setdefaultproxy(socks.SOCKS4, proxy_host, proxy_port)
198 | socket.socket = socks.socksocket
199 | elif proxy_type == 'socks5':
200 | socks.setdefaultproxy(socks.SOCKS5, proxy_host, proxy_port)
201 | socket.socket = socks.socksocket
202 | if proxy_username and proxy_password:
203 | socks.wrapmodule(requests)
204 | requests.get = socks.socksocket.get
205 | else:
206 | raise ValueError('Unsupported proxy type')
207 | else:
208 | proxies = None
209 |
210 | # Read piped input if available
211 | if not sys.stdin.isatty():
212 | input_lines = sys.stdin.readlines()
213 | for line in input_lines:
214 | line = line.strip()
215 | extracted = extract_js(line, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
216 | results += extracted
217 | results.append("") # Add an empty line for readability
218 | print("\n".join(extracted))
219 | print()
220 |
221 | # Process URL or list of domains if provided
222 | if args.url:
223 | extracted = extract_js(args.url, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
224 | results += extracted
225 | results.append("") # Add an empty line for readability
226 | print("\n".join(extracted))
227 | print()
228 |
229 | if args.list:
230 | with open(args.list, 'r') as f:
231 | domains = f.read().splitlines()
232 | for domain in domains:
233 | extracted = extract_js(domain, args.debug, args.download, args.output_dir, args.create_wordlists, args.retries)
234 | results += extracted
235 | results.append("") # Add an empty line for readability
236 | print("\n".join(extracted))
237 | print()
238 |
239 | # Write the results to the output file
240 | if args.output:
241 | output_content = "\n".join(results)
242 | with open(args.output, 'w') as f:
243 | f.write(output_content)
244 | print(Fore.CYAN + f"Results written to {args.output}" + Style.RESET_ALL)
245 |
246 | # Create output directory if specified
247 | if args.output_dir:
248 | output_dir = os.path.abspath(args.output_dir)
249 | os.makedirs(output_dir, exist_ok=True)
250 | print(Fore.CYAN + f"Output directory created: {output_dir}" + Style.RESET_ALL)
251 |
252 | if __name__ == "__main__":
253 | main()
254 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | argparse
2 | requests
3 | pyfiglet
4 | colorama
5 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name='js-finding',
5 | version='1.002',
6 | description='A tool to extract JS files from given domains',
7 | author='pikpikcu',
8 | author_email='N/A',
9 | url='http://github.com/pikpikcu/js-finding',
10 | packages=find_packages(),
11 | install_requires=[
12 | 'requests',
13 | 'pyfiglet',
14 | 'colorama',
15 | 'PySocks',
16 | ],
17 | entry_points={
18 | 'console_scripts': [
19 | 'jsfind=core.main:main',
20 | ],
21 | },
22 | classifiers=[
23 | 'Intended Audience :: Developers',
24 | 'License :: OSI Approved :: MIT License',
25 | 'Programming Language :: Python',
26 | 'Programming Language :: Python :: 3',
27 | 'Programming Language :: Python :: 3.6',
28 | 'Programming Language :: Python :: 3.7',
29 | 'Programming Language :: Python :: 3.8',
30 | 'Programming Language :: Python :: 3.9',
31 | ],
32 | )
33 |
--------------------------------------------------------------------------------