├── README.md ├── drag_and_drop.py ├── requirements.txt └── zs-dl.py /README.md: -------------------------------------------------------------------------------- 1 | # ZS-DL 2 | CLI Zippyshare downloader written in Python. JS execution and BSoup-**FREE**. 3 | [Windows binaries](https://github.com/Sorrow446/ZS-DL/releases) 4 | Upload to Zippyshare with [ZS-UL](https://github.com/Sorrow446/ZS-UL). 5 | **People have been seen selling my tools. DO NOT buy them. My tools are free and always will be.** 6 | ![](https://i.imgur.com/maoyc07.png) 7 | 8 | # Usage 9 | Download two files to default directory: 10 | `ZS-DL.py -u https://www1.zippyshare.com/v/00000000/file.html https://www1.zippyshare.com/v/00000000/file.html` 11 | 12 | Download from text file to "G:\ZS-DL downloads" with HTTPS proxy: 13 | `ZS-DL.py -u G:\links.txt -o "G:\ZS-DL downloads" -p 0.0.0.0:8080` 14 | 15 | Download from DLC container and a single regular URL to default directory: 16 | `ZS-DL.py -u https://www1.zippyshare.com/v/00000000/file.html G:\1.dlc` 17 | ``` 18 | _____ _____ ____ __ 19 | |__ | __|___| \| | 20 | | __|__ |___| | | |__ 21 | |_____|_____| |____/|_____| 22 | 23 | usage: zs-dl.py [-h] -u URLS [URLS ...] [-o OUTPUT_PATH] [-ov] [-p PROXY] 24 | 25 | optional arguments: 26 | -h, --help show this help message and exit 27 | -u URLS [URLS ...], --urls URLS [URLS ...] 28 | URLs separated by a space or an abs path to a txt 29 | file. 30 | -o OUTPUT_PATH, --output-path OUTPUT_PATH 31 | Abs output directory. 32 | -ov, --overwrite Overwrite file if already exists. 33 | -p PROXY, --proxy PROXY 34 | HTTPS only. :. 35 | ``` 36 | -------------------------------------------------------------------------------- /drag_and_drop.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | 4 | # Append other args here. 5 | call = ['python', 'zs-dl.py'] 6 | call.append('-u') 7 | call.extend(sys.argv[1:]) 8 | subprocess.Popen(call) 9 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | requests 3 | -------------------------------------------------------------------------------- /zs-dl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import re 5 | import sys 6 | import math 7 | import json 8 | import time 9 | import argparse 10 | import traceback 11 | try: 12 | from urllib.parse import unquote 13 | except ImportError: 14 | from urllib import unquote 15 | 16 | import requests 17 | from tqdm import tqdm 18 | 19 | 20 | 21 | def read_txt(abs): 22 | with open(abs) as f: 23 | return [u.strip() for u in f.readlines()] 24 | 25 | def decrypt_dlc(abs): 26 | # Thank you, dcrypt owner(s). 27 | url = "http://dcrypt.it/decrypt/paste" 28 | r = s.post(url, data={ 29 | 'content': open(abs) 30 | } 31 | ) 32 | r.raise_for_status() 33 | j = json.loads(r.text) 34 | if not j.get('success'): 35 | raise Exception(j) 36 | return j['success']['links'] 37 | 38 | def parse_prefs(): 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument( 41 | '-u', '--urls', 42 | nargs='+', required=True, 43 | help='URLs separated by a space or an abs path to a txt file.' 44 | ) 45 | parser.add_argument( 46 | '-o', '--output-path', 47 | default='ZS-DL downloads', 48 | help='Abs output directory.' 49 | ) 50 | parser.add_argument( 51 | '-ov', '--overwrite', 52 | action='store_true', 53 | help='Overwrite file if already exists.' 54 | ) 55 | parser.add_argument( 56 | '-p', '--proxy', 57 | help='HTTPS only. :.' 58 | ) 59 | args = parser.parse_args() 60 | if args.urls[0].endswith('.txt'): 61 | args.urls = read_txt(args.urls[0]) 62 | for url in args.urls: 63 | if url.endswith('.dlc'): 64 | print("Processing DLC container: " + url) 65 | try: 66 | args.urls = args.urls + decrypt_dlc(url) 67 | except Exception as e: 68 | err("Failed to decrypt DLC container: " + url) 69 | args.urls.remove(url) 70 | time.sleep(1) 71 | return args 72 | 73 | def dir_setup(): 74 | if not os.path.isdir(cfg.output_path): 75 | os.makedirs(cfg.output_path) 76 | 77 | def err(txt): 78 | print(txt) 79 | traceback.print_exc() 80 | 81 | def set_proxy(): 82 | s.proxies.update({'https': 'https://' + cfg.proxy}) 83 | 84 | def check_url(url): 85 | regex = r'https://www(\d{1,3}).zippyshare.com/v/([a-zA-Z\d]{8})/file.html' 86 | match = re.match(regex, url) 87 | if match: 88 | return match.group(1), match.group(2) 89 | raise ValueError("Invalid URL: " + str(url)) 90 | 91 | def extract(url, server, _id): 92 | regex = ( 93 | r'document.getElementById\(\'dlbutton\'\).href = "/d/[a-zA-Z\d]{8}/" \+ ' 94 | r'\((\d{6}) % 51245 \+ (\d{6}) % 913\) \+ "/([\w%-.]+)";' 95 | ) 96 | for _ in range(3): 97 | r = s.get(url) 98 | if r.status_code != 500: 99 | break 100 | time.sleep(1) 101 | r.raise_for_status() 102 | meta = re.search(regex, r.text, re.DOTALL) 103 | if not meta: 104 | raise Exception('Failed to get file URL. File down or pattern changed.') 105 | num_1 = int(meta.group(1)) 106 | num_2 = int(meta.group(2)) 107 | final_num = num_1 % 51245 + num_2 % 913 108 | enc_fname = meta.group(3) 109 | file_url = "https://www{}.zippyshare.com/d/{}/{}/{}".format(server, _id, final_num, enc_fname) 110 | return file_url, unquote(enc_fname) 111 | 112 | def get_file(ref, url): 113 | s.headers.update({ 114 | 'Range': "bytes=0-", 115 | 'Referer': ref 116 | }) 117 | r = s.get(url, stream=True) 118 | del s.headers['Range'] 119 | del s.headers['Referer'] 120 | r.raise_for_status() 121 | length = int(r.headers['Content-Length']) 122 | return r, length 123 | 124 | def download(ref, url, fname): 125 | print(fname) 126 | abs = os.path.join(cfg.output_path, fname) 127 | if os.path.isfile(abs): 128 | if cfg.overwrite: 129 | print("File already exists locally. Will overwrite.") 130 | else: 131 | print("File already exists locally.") 132 | return 133 | r, size = get_file(ref, url) 134 | with open(abs, 'wb') as f: 135 | with tqdm(total=size, unit='B', 136 | unit_scale=True, unit_divisor=1024, 137 | initial=0, miniters=1) as bar: 138 | for chunk in r.iter_content(32*1024): 139 | if chunk: 140 | f.write(chunk) 141 | bar.update(len(chunk)) 142 | 143 | def main(url): 144 | server, _id = check_url(url) 145 | file_url, fname = extract(url, server, _id) 146 | download(url, file_url, fname) 147 | 148 | if __name__ == '__main__': 149 | try: 150 | if hasattr(sys, 'frozen'): 151 | os.chdir(os.path.dirname(sys.executable)) 152 | else: 153 | os.chdir(os.path.dirname(__file__)) 154 | except OSError: 155 | pass 156 | 157 | s = requests.Session() 158 | s.headers.update({ 159 | 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " 160 | "AppleWebKit/537.36 (KHTML, like Gecko) Chrome" 161 | "/75.0.3770.100 Safari/537.36" 162 | }) 163 | 164 | print(""" 165 | _____ _____ ____ __ 166 | |__ | __|___| \| | 167 | | __|__ |___| | | |__ 168 | |_____|_____| |____/|_____| 169 | """) 170 | cfg = parse_prefs() 171 | dir_setup() 172 | if cfg.proxy: 173 | set_proxy() 174 | total = len(cfg.urls) 175 | for num, url in enumerate(cfg.urls, 1): 176 | print("\nURL {} of {}:".format(num, total)) 177 | try: 178 | main(url) 179 | except Exception as e: 180 | err('URL failed.') 181 | --------------------------------------------------------------------------------