├── requirements.txt ├── .gitignore ├── getsnippet.py ├── addsnippet.py ├── auth.py ├── batchdiff.py ├── backup.py ├── checked.py ├── README.md ├── append.html ├── instantview-frame.js ├── spider.py ├── delayed_service.py ├── ivdiff.py └── delayed_userscript.js /requirements.txt: -------------------------------------------------------------------------------- 1 | lxml 2 | scrapy -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | download 2 | backup 3 | issues.json 4 | issues_all.json 5 | snippets.json 6 | gen 7 | cookies.txt 8 | ivdiff.log 9 | -------------------------------------------------------------------------------- /getsnippet.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | snippet = input() 5 | try: 6 | file = open("snippets.json", "r") 7 | ls = json.loads(file.read()) 8 | except Exception: 9 | ls = {} 10 | print(ls[snippet]) 11 | file.close() 12 | -------------------------------------------------------------------------------- /addsnippet.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | try: 5 | file = open("snippets.json", "r") 6 | ls = json.loads(file.read()) 7 | except Exception: 8 | ls = {} 9 | print(ls) 10 | file.close() 11 | 12 | print("name?") 13 | name = input() 14 | 15 | print("code?") 16 | lines = [] 17 | while True: 18 | try: 19 | line = input() 20 | lines.append(line) 21 | except KeyboardInterrupt: 22 | print("end") 23 | break 24 | text = '\n'.join(lines) 25 | 26 | ls[name] = text 27 | 28 | file = open("snippets.json", "w") 29 | file.write(json.dumps(ls)) 30 | file.close() 31 | -------------------------------------------------------------------------------- /auth.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import argparse 3 | import ivdiff 4 | import time 5 | 6 | 7 | def auth(phone, file): 8 | d = "https://instantview.telegram.org/" 9 | r = requests.get(d) 10 | cookies = r.cookies 11 | r = requests.post(d + "auth/request", cookies=cookies, data={"phone": phone}, headers={ 12 | "X-Requested-With": "XMLHttpRequest", 13 | # BEZ PALEVA ( ͡° ͜ʖ ͡°) 14 | "User-Agent": "Mozilla/5.0 (PlayStation 4 2.50) AppleWebKit/537.73 (KHTML, like Gecko)" 15 | }) 16 | try: 17 | temp_session = r.json()["temp_session"] 18 | except Exception: 19 | print("Error: {}".format(r.content.decode("utf-8"))) 20 | return 21 | 22 | print("waiting for request approval...") 23 | while r.content != b"true": 24 | r = requests.post(d + "auth/login", cookies=cookies, data={"temp_session": temp_session}, headers={"X-Requested-With": "XMLHttpRequest"}) 25 | cookies = requests.cookies.merge_cookies(cookies, r.cookies).get_dict() 26 | print("success! getting stel_ivs...") 27 | 28 | stel_ivs = None 29 | while stel_ivs is None: 30 | print("getting stel_ivs...") 31 | # Domain and url can be actually anything, just make sure it exists in the contest 32 | r = ivdiff.getHtml("dotekomanie.cz", cookies, "dotekomanie.cz", 1) 33 | if r is not None and "stel_ivs" in r[3]: 34 | stel_ivs = r[3] 35 | else: 36 | time.sleep(10) 37 | print(f"success! {stel_ivs}") 38 | cookies = stel_ivs 39 | 40 | with open(file, 'wb') as f: 41 | for i, v in cookies.items(): 42 | f.write("{}={}; ".format(i, v).encode("utf-8")) 43 | 44 | 45 | if __name__ == "__main__": 46 | parser = argparse.ArgumentParser(description='Auth to IV and get cookies.txt file') 47 | parser.add_argument('phone', type=str, help='phone number') 48 | parser.add_argument('--cookies', '-c', help='path to file to write cookies to (default is cookies.txt)', nargs='?', default="cookies.txt") 49 | 50 | args = parser.parse_args() 51 | auth(args.phone, args.cookies) 52 | -------------------------------------------------------------------------------- /batchdiff.py: -------------------------------------------------------------------------------- 1 | import ivdiff 2 | from multiprocessing import Pool 3 | from multiprocessing import Event 4 | import argparse 5 | import json 6 | import ctypes 7 | import readchar 8 | 9 | 10 | parsed = 0 11 | crawled = 0 12 | have_diff = 0 13 | olds = 0 14 | 15 | 16 | def callback(roflan): 17 | global parsed, have_diff 18 | parsed += 1 19 | if roflan is None or roflan[1] == -1: 20 | print(f"an error occured for url {roflan[0]}") 21 | return 22 | if roflan[1] == 1: 23 | have_diff += 1 24 | ctypes.windll.kernel32.SetConsoleTitleW(f"{(parsed / crawled * 100):.2f}% [{parsed} / crawled {crawled}] (w/ diff {have_diff}, old {olds}) | diffed {roflan[0]}") 25 | 26 | 27 | if __name__ == '__main__': 28 | parser = argparse.ArgumentParser(description='Diff the whole file full of links D:') 29 | parser.add_argument('t1', metavar='first_template', type=str, help='first template number OR template file path') 30 | parser.add_argument('t2', metavar='second_template', type=str, help='second template number OR template file path') 31 | parser.add_argument('file', type=str, help='file with links to diff') 32 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt") 33 | parser.add_argument('--poolsize', '-p', help='concurrent connections count(default=5)', type=int, nargs='?', default=5) 34 | parser.add_argument('--nobrowser', '-n', help='do not open browser when diff is found', action='store_true') 35 | parser.add_argument('--browser', '-b', help='browser or path to program to open diff', nargs='?', default="") 36 | 37 | args = parser.parse_args() 38 | 39 | event = Event() 40 | p = Pool(args.poolsize, ivdiff.setup, (event,)) 41 | event.set() 42 | cookies = ivdiff.parseCookies(args.cookies) 43 | 44 | f = list(json.loads(open(args.file, "r").read()).values())[::-1] 45 | print("Total: {}".format(len(f))) 46 | crawled = len(f) 47 | z = 0 48 | for i in f: 49 | p.apply_async(ivdiff.checkDiff, [args.nobrowser, cookies, i, args.t1, args.t2, args.browser], callback=callback) 50 | pause = False 51 | while True: 52 | e = readchar.readchar() 53 | if e == b'q': 54 | print("quit") 55 | break 56 | 57 | if e == b'k': 58 | p.terminate() 59 | print("Killed pool") 60 | 61 | if e == b' ': 62 | pause = not pause 63 | if pause: 64 | event.clear() 65 | else: 66 | event.set() 67 | print(f"pause = {pause}") 68 | -------------------------------------------------------------------------------- /backup.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import ivdiff 3 | import re 4 | from lxml import etree 5 | import argparse 6 | import urllib3 7 | import json 8 | from lxml import html 9 | import os 10 | 11 | htmlparser = etree.HTMLParser(remove_blank_text=True) 12 | verify = True 13 | if not verify: 14 | urllib3.disable_warnings() 15 | 16 | headers = { 17 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0", 18 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 19 | "Accept-Language": "en-US,en;q=0.5", 20 | "Accept-Encoding": "gzip, deflate, br", 21 | "Connection": "keep-alive", 22 | "Upgrade-Insecure-Requests": "1", 23 | "Cache-Control": "max-age=0" 24 | } 25 | 26 | 27 | def getHashAndRules(domain, url, cookies): 28 | d = "https://instantview.telegram.org/my/{}".format(domain) 29 | 30 | headers = { 31 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0", 32 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 33 | "Accept-Language": "en-US,en;q=0.5", 34 | "Accept-Encoding": "gzip, deflate, br", 35 | "Referer": d, 36 | "Connection": "keep-alive", 37 | "Upgrade-Insecure-Requests": "1", 38 | "Cache-Control": "max-age=0" 39 | } 40 | 41 | while True: 42 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=url)) 43 | cookies = dict(list(cookies.items()) + list(r.cookies.get_dict().items())) 44 | 45 | hash = re.search("my\\?hash=(.*?)\",", str(r.content)).group(1) 46 | tree = html.fromstring(r.content.decode("utf8")) 47 | 48 | rules = json.loads(re.search("initWorkspace\\(\".*?\",(.*)\\);", tree.xpath("//script[last()]/text()")[0]).group(1)) 49 | 50 | try: 51 | return (rules["rules"], hash, cookies) 52 | except Exception: 53 | print("retry") 54 | pass 55 | 56 | 57 | def getAll(cookies): 58 | r = requests.get("https://instantview.telegram.org/my", headers=headers, cookies=cookies) 59 | h = html.fromstring(r.content) 60 | fn = "backup/" 61 | try: 62 | os.makedirs(os.path.dirname(fn)) 63 | except Exception: 64 | pass 65 | for i in h.xpath("//h3/a[@class=\"section-header\"]/text()"): 66 | print(i) 67 | rules, hash, cc = getHashAndRules(i, i, cookies) 68 | f = open(fn + i + ".xpath", "w", encoding='utf8') 69 | f.write(rules) 70 | f.close() 71 | 72 | 73 | if __name__ == "__main__": 74 | parser = argparse.ArgumentParser(description='Backup all templates') 75 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt") 76 | 77 | args = parser.parse_args() 78 | cookies = ivdiff.parseCookies(args.cookies) 79 | getAll(cookies) 80 | -------------------------------------------------------------------------------- /checked.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import ivdiff 3 | import re 4 | from lxml import etree 5 | from io import StringIO 6 | import argparse 7 | import urllib3 8 | from functools import partial 9 | import json 10 | 11 | 12 | htmlparser = etree.HTMLParser(remove_blank_text=True) 13 | verify = True 14 | if not verify: 15 | urllib3.disable_warnings() 16 | 17 | headers = { 18 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0", 19 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 20 | "Accept-Language": "en-US,en;q=0.5", 21 | "Accept-Encoding": "gzip, deflate, br", 22 | "Connection": "keep-alive", 23 | "Upgrade-Insecure-Requests": "1", 24 | "Cache-Control": "max-age=0" 25 | } 26 | 27 | 28 | def getHashAndRules(domain, cookies): 29 | d = "https://instantview.telegram.org/my/{}".format(domain) 30 | 31 | headers = { 32 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0", 33 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 34 | "Accept-Language": "en-US,en;q=0.5", 35 | "Accept-Encoding": "gzip, deflate, br", 36 | "Referer": d, 37 | "Connection": "keep-alive", 38 | "Upgrade-Insecure-Requests": "1", 39 | "Cache-Control": "max-age=0" 40 | } 41 | 42 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=domain)) 43 | hash = re.search("my\\?hash=(.*?)\",", str(r.content)).group(1) 44 | rules = json.loads(re.search("initWorkspace\\(\".*?\",(.*?)\\);", str(r.content.decode("utf8"))).group(1)) 45 | return (rules["rules"], hash) 46 | 47 | 48 | def checkAll(domain, cookies): 49 | rules, hash = getHashAndRules(domain, cookies) 50 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=domain, section=domain, method="processByRules", rules_id="", rules=rules, random_id="")) 51 | rid = r.json()["random_id"] 52 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=domain, section=domain, method="getSectionData")) 53 | tree = etree.parse(StringIO(r.json()["items"]), htmlparser) 54 | list(map(partial(check, domain, cookies, rid, hash), tree.xpath("//h4/text()"))) 55 | 56 | 57 | def check(domain, cookies, rid, hash, url): 58 | print(url) 59 | headers["X-Requested-With"] = "XMLHttpRequest" 60 | headers["Accept"] = "application/json, text/javascript, */*; q=0.01" 61 | r = [] 62 | while r is False or "contest_ready" not in r: 63 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(random_id=rid, url=url, section=domain, method="markUrlAsChecked")).json() 64 | 65 | 66 | if __name__ == "__main__": 67 | parser = argparse.ArgumentParser(description='Mark all the IV templates as checked.') 68 | parser.add_argument('domain', metavar='domain', type=str, help='domain where script should check all the pages') 69 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt") 70 | 71 | args = parser.parse_args() 72 | cookies = ivdiff.parseCookies(args.cookies) 73 | checkAll(args.domain, cookies) 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ivdiff 2 | 3 | scripts to get difference between two [Instant View](https://instantview.telegram.org) templates 4 | 5 | ## Installing 6 | 7 | Install [python3](https://www.python.org/downloads/) and [pip](https://pypi.org/project/pip/). 8 | Then run `git clone https://github.com/undrfined/ivdiff; cd ivdiff; pip install -r requirements.txt`. 9 | 10 | ## Auth 11 | 12 | To authenticate run script `auth.py`: 13 | 14 | ``` 15 | py auth.py +38093******6 16 | ``` 17 | ...and you're ready to go. 18 | 19 | ## ivdiff.py 20 | 21 | get diff for one specific page 22 | 23 | usage: 24 | 25 | ``` 26 | py ivdiff.py