├── requirements.txt
├── .gitignore
├── getsnippet.py
├── addsnippet.py
├── auth.py
├── batchdiff.py
├── backup.py
├── checked.py
├── README.md
├── append.html
├── instantview-frame.js
├── spider.py
├── delayed_service.py
├── ivdiff.py
└── delayed_userscript.js
/requirements.txt:
--------------------------------------------------------------------------------
1 | lxml
2 | scrapy
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | download
2 | backup
3 | issues.json
4 | issues_all.json
5 | snippets.json
6 | gen
7 | cookies.txt
8 | ivdiff.log
9 |
--------------------------------------------------------------------------------
/getsnippet.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | snippet = input()
5 | try:
6 | file = open("snippets.json", "r")
7 | ls = json.loads(file.read())
8 | except Exception:
9 | ls = {}
10 | print(ls[snippet])
11 | file.close()
12 |
--------------------------------------------------------------------------------
/addsnippet.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | try:
5 | file = open("snippets.json", "r")
6 | ls = json.loads(file.read())
7 | except Exception:
8 | ls = {}
9 | print(ls)
10 | file.close()
11 |
12 | print("name?")
13 | name = input()
14 |
15 | print("code?")
16 | lines = []
17 | while True:
18 | try:
19 | line = input()
20 | lines.append(line)
21 | except KeyboardInterrupt:
22 | print("end")
23 | break
24 | text = '\n'.join(lines)
25 |
26 | ls[name] = text
27 |
28 | file = open("snippets.json", "w")
29 | file.write(json.dumps(ls))
30 | file.close()
31 |
--------------------------------------------------------------------------------
/auth.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import argparse
3 | import ivdiff
4 | import time
5 |
6 |
7 | def auth(phone, file):
8 | d = "https://instantview.telegram.org/"
9 | r = requests.get(d)
10 | cookies = r.cookies
11 | r = requests.post(d + "auth/request", cookies=cookies, data={"phone": phone}, headers={
12 | "X-Requested-With": "XMLHttpRequest",
13 | # BEZ PALEVA ( ͡° ͜ʖ ͡°)
14 | "User-Agent": "Mozilla/5.0 (PlayStation 4 2.50) AppleWebKit/537.73 (KHTML, like Gecko)"
15 | })
16 | try:
17 | temp_session = r.json()["temp_session"]
18 | except Exception:
19 | print("Error: {}".format(r.content.decode("utf-8")))
20 | return
21 |
22 | print("waiting for request approval...")
23 | while r.content != b"true":
24 | r = requests.post(d + "auth/login", cookies=cookies, data={"temp_session": temp_session}, headers={"X-Requested-With": "XMLHttpRequest"})
25 | cookies = requests.cookies.merge_cookies(cookies, r.cookies).get_dict()
26 | print("success! getting stel_ivs...")
27 |
28 | stel_ivs = None
29 | while stel_ivs is None:
30 | print("getting stel_ivs...")
31 | # Domain and url can be actually anything, just make sure it exists in the contest
32 | r = ivdiff.getHtml("dotekomanie.cz", cookies, "dotekomanie.cz", 1)
33 | if r is not None and "stel_ivs" in r[3]:
34 | stel_ivs = r[3]
35 | else:
36 | time.sleep(10)
37 | print(f"success! {stel_ivs}")
38 | cookies = stel_ivs
39 |
40 | with open(file, 'wb') as f:
41 | for i, v in cookies.items():
42 | f.write("{}={}; ".format(i, v).encode("utf-8"))
43 |
44 |
45 | if __name__ == "__main__":
46 | parser = argparse.ArgumentParser(description='Auth to IV and get cookies.txt file')
47 | parser.add_argument('phone', type=str, help='phone number')
48 | parser.add_argument('--cookies', '-c', help='path to file to write cookies to (default is cookies.txt)', nargs='?', default="cookies.txt")
49 |
50 | args = parser.parse_args()
51 | auth(args.phone, args.cookies)
52 |
--------------------------------------------------------------------------------
/batchdiff.py:
--------------------------------------------------------------------------------
1 | import ivdiff
2 | from multiprocessing import Pool
3 | from multiprocessing import Event
4 | import argparse
5 | import json
6 | import ctypes
7 | import readchar
8 |
9 |
10 | parsed = 0
11 | crawled = 0
12 | have_diff = 0
13 | olds = 0
14 |
15 |
16 | def callback(roflan):
17 | global parsed, have_diff
18 | parsed += 1
19 | if roflan is None or roflan[1] == -1:
20 | print(f"an error occured for url {roflan[0]}")
21 | return
22 | if roflan[1] == 1:
23 | have_diff += 1
24 | ctypes.windll.kernel32.SetConsoleTitleW(f"{(parsed / crawled * 100):.2f}% [{parsed} / crawled {crawled}] (w/ diff {have_diff}, old {olds}) | diffed {roflan[0]}")
25 |
26 |
27 | if __name__ == '__main__':
28 | parser = argparse.ArgumentParser(description='Diff the whole file full of links D:')
29 | parser.add_argument('t1', metavar='first_template', type=str, help='first template number OR template file path')
30 | parser.add_argument('t2', metavar='second_template', type=str, help='second template number OR template file path')
31 | parser.add_argument('file', type=str, help='file with links to diff')
32 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt")
33 | parser.add_argument('--poolsize', '-p', help='concurrent connections count(default=5)', type=int, nargs='?', default=5)
34 | parser.add_argument('--nobrowser', '-n', help='do not open browser when diff is found', action='store_true')
35 | parser.add_argument('--browser', '-b', help='browser or path to program to open diff', nargs='?', default="")
36 |
37 | args = parser.parse_args()
38 |
39 | event = Event()
40 | p = Pool(args.poolsize, ivdiff.setup, (event,))
41 | event.set()
42 | cookies = ivdiff.parseCookies(args.cookies)
43 |
44 | f = list(json.loads(open(args.file, "r").read()).values())[::-1]
45 | print("Total: {}".format(len(f)))
46 | crawled = len(f)
47 | z = 0
48 | for i in f:
49 | p.apply_async(ivdiff.checkDiff, [args.nobrowser, cookies, i, args.t1, args.t2, args.browser], callback=callback)
50 | pause = False
51 | while True:
52 | e = readchar.readchar()
53 | if e == b'q':
54 | print("quit")
55 | break
56 |
57 | if e == b'k':
58 | p.terminate()
59 | print("Killed pool")
60 |
61 | if e == b' ':
62 | pause = not pause
63 | if pause:
64 | event.clear()
65 | else:
66 | event.set()
67 | print(f"pause = {pause}")
68 |
--------------------------------------------------------------------------------
/backup.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import ivdiff
3 | import re
4 | from lxml import etree
5 | import argparse
6 | import urllib3
7 | import json
8 | from lxml import html
9 | import os
10 |
11 | htmlparser = etree.HTMLParser(remove_blank_text=True)
12 | verify = True
13 | if not verify:
14 | urllib3.disable_warnings()
15 |
16 | headers = {
17 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
18 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
19 | "Accept-Language": "en-US,en;q=0.5",
20 | "Accept-Encoding": "gzip, deflate, br",
21 | "Connection": "keep-alive",
22 | "Upgrade-Insecure-Requests": "1",
23 | "Cache-Control": "max-age=0"
24 | }
25 |
26 |
27 | def getHashAndRules(domain, url, cookies):
28 | d = "https://instantview.telegram.org/my/{}".format(domain)
29 |
30 | headers = {
31 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
32 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
33 | "Accept-Language": "en-US,en;q=0.5",
34 | "Accept-Encoding": "gzip, deflate, br",
35 | "Referer": d,
36 | "Connection": "keep-alive",
37 | "Upgrade-Insecure-Requests": "1",
38 | "Cache-Control": "max-age=0"
39 | }
40 |
41 | while True:
42 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=url))
43 | cookies = dict(list(cookies.items()) + list(r.cookies.get_dict().items()))
44 |
45 | hash = re.search("my\\?hash=(.*?)\",", str(r.content)).group(1)
46 | tree = html.fromstring(r.content.decode("utf8"))
47 |
48 | rules = json.loads(re.search("initWorkspace\\(\".*?\",(.*)\\);", tree.xpath("//script[last()]/text()")[0]).group(1))
49 |
50 | try:
51 | return (rules["rules"], hash, cookies)
52 | except Exception:
53 | print("retry")
54 | pass
55 |
56 |
57 | def getAll(cookies):
58 | r = requests.get("https://instantview.telegram.org/my", headers=headers, cookies=cookies)
59 | h = html.fromstring(r.content)
60 | fn = "backup/"
61 | try:
62 | os.makedirs(os.path.dirname(fn))
63 | except Exception:
64 | pass
65 | for i in h.xpath("//h3/a[@class=\"section-header\"]/text()"):
66 | print(i)
67 | rules, hash, cc = getHashAndRules(i, i, cookies)
68 | f = open(fn + i + ".xpath", "w", encoding='utf8')
69 | f.write(rules)
70 | f.close()
71 |
72 |
73 | if __name__ == "__main__":
74 | parser = argparse.ArgumentParser(description='Backup all templates')
75 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt")
76 |
77 | args = parser.parse_args()
78 | cookies = ivdiff.parseCookies(args.cookies)
79 | getAll(cookies)
80 |
--------------------------------------------------------------------------------
/checked.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import ivdiff
3 | import re
4 | from lxml import etree
5 | from io import StringIO
6 | import argparse
7 | import urllib3
8 | from functools import partial
9 | import json
10 |
11 |
12 | htmlparser = etree.HTMLParser(remove_blank_text=True)
13 | verify = True
14 | if not verify:
15 | urllib3.disable_warnings()
16 |
17 | headers = {
18 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
19 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
20 | "Accept-Language": "en-US,en;q=0.5",
21 | "Accept-Encoding": "gzip, deflate, br",
22 | "Connection": "keep-alive",
23 | "Upgrade-Insecure-Requests": "1",
24 | "Cache-Control": "max-age=0"
25 | }
26 |
27 |
28 | def getHashAndRules(domain, cookies):
29 | d = "https://instantview.telegram.org/my/{}".format(domain)
30 |
31 | headers = {
32 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
33 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
34 | "Accept-Language": "en-US,en;q=0.5",
35 | "Accept-Encoding": "gzip, deflate, br",
36 | "Referer": d,
37 | "Connection": "keep-alive",
38 | "Upgrade-Insecure-Requests": "1",
39 | "Cache-Control": "max-age=0"
40 | }
41 |
42 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=domain))
43 | hash = re.search("my\\?hash=(.*?)\",", str(r.content)).group(1)
44 | rules = json.loads(re.search("initWorkspace\\(\".*?\",(.*?)\\);", str(r.content.decode("utf8"))).group(1))
45 | return (rules["rules"], hash)
46 |
47 |
48 | def checkAll(domain, cookies):
49 | rules, hash = getHashAndRules(domain, cookies)
50 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=domain, section=domain, method="processByRules", rules_id="", rules=rules, random_id=""))
51 | rid = r.json()["random_id"]
52 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=domain, section=domain, method="getSectionData"))
53 | tree = etree.parse(StringIO(r.json()["items"]), htmlparser)
54 | list(map(partial(check, domain, cookies, rid, hash), tree.xpath("//h4/text()")))
55 |
56 |
57 | def check(domain, cookies, rid, hash, url):
58 | print(url)
59 | headers["X-Requested-With"] = "XMLHttpRequest"
60 | headers["Accept"] = "application/json, text/javascript, */*; q=0.01"
61 | r = []
62 | while r is False or "contest_ready" not in r:
63 | r = requests.post("https://instantview.telegram.org/api/my", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(random_id=rid, url=url, section=domain, method="markUrlAsChecked")).json()
64 |
65 |
66 | if __name__ == "__main__":
67 | parser = argparse.ArgumentParser(description='Mark all the IV templates as checked.')
68 | parser.add_argument('domain', metavar='domain', type=str, help='domain where script should check all the pages')
69 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt")
70 |
71 | args = parser.parse_args()
72 | cookies = ivdiff.parseCookies(args.cookies)
73 | checkAll(args.domain, cookies)
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ivdiff
2 |
3 | scripts to get difference between two [Instant View](https://instantview.telegram.org) templates
4 |
5 | ## Installing
6 |
7 | Install [python3](https://www.python.org/downloads/) and [pip](https://pypi.org/project/pip/).
8 | Then run `git clone https://github.com/undrfined/ivdiff; cd ivdiff; pip install -r requirements.txt`.
9 |
10 | ## Auth
11 |
12 | To authenticate run script `auth.py`:
13 |
14 | ```
15 | py auth.py +38093******6
16 | ```
17 | ...and you're ready to go.
18 |
19 | ## ivdiff.py
20 |
21 | get diff for one specific page
22 |
23 | usage:
24 |
25 | ```
26 | py ivdiff.py [-c ] [-b ]
27 | ```
28 |
29 | ...where `` is a template number if it was submitted to contest (for example `45`) or filename with the template code(for example `file.xpath`). Also you can use `~` to download current code from My Templates section.
30 |
31 | **Please do a backup of your code before using filename or `~` as one of the templates**
32 |
33 | `` is an URL to diff.
34 |
35 | `` is a browser name (according to [docs](https://docs.python.org/3/library/webbrowser.html)) or path to program to open file
36 |
37 | ## batchdiff.py
38 |
39 | get diff for a lot of pages from file
40 |
41 | usage:
42 |
43 | ```
44 | py batchdiff.py [-c ] [-p ]
45 | ```
46 |
47 | ...where `` is a filename with list of all the urls you want to diff and `` is count of threads you want to use (default 5)
48 |
49 | ## spider.py
50 |
51 | collect all the URLs automatically and get diff for all of them
52 |
53 | usage:
54 |
55 | ```
56 | py spider.py [-c ] [-p ] [-b ] [-w ] [-r ]
57 | ```
58 |
59 | ...where `` is a domain name (for example `5minutes.rtl.lu`)
60 |
61 | `` is a browser name (according to [docs](https://docs.python.org/3/library/webbrowser.html)) or path to program to open file
62 |
63 | `` is a list of xpathes that will be checked in the IV.
64 |
65 | `` you guessed it, restricts xpathes to be checked in the IV
66 |
67 | ## checked.py
68 |
69 | press "Mark as checked" for all the links in the domain.
70 |
71 | usage:
72 |
73 | ```
74 | py checked.py [-c ]
75 | ```
76 | **Please do a backup of your code before using this**
77 |
78 | ## backup.py
79 |
80 | downloads all the templates
81 |
82 | usage:
83 |
84 | ```
85 | py backup.py [-c ]
86 | ```
87 |
88 | ## cdo, do and other awesome macros for diff
89 |
90 | You can use macros inside of your IV template code to diff more easily. There are three types of macros:
91 |
92 | - `##do [(alias OR template number) list separated with space OR nothing]`, means "do this block of code for **d**iff **o**nly"
93 | - `##cdo [(alias OR template number) list separated with space OR nothing]`, means "**c**omment out this block of code for **d**iff **o**nly"
94 | - `##s [alias] [template number]`, means "**s**et alias to template number"
95 |
96 | Example usage:
97 |
98 | ```
99 | ##s undrfined 10 (set alias "undrfined" to template number 10. don't forget to update it tho!)
100 |
101 | ##do undrfined Vlad 111 (use this code block only when diffing with undrfined or Vlad or template#111)
102 | @datetime: //body/time
103 | published_date: $@
104 | ##? (means else, do this block for every other diff)
105 | published_date: //meta[@property="date"]/@content
106 | ##
107 | ```
108 |
109 | ivdiff will automatically comment out all the code when you'll start diffing with other template.
110 |
111 | ## ivdiff.py#compare()
112 |
113 | You can use this method if you want to remove some elements that exist in one template but are missing in another. Or you can convert them somehow to match other contestant's template.
114 |
115 | ## Linux & Mac OS
116 |
117 | ..are not supported because I didn't test it there 🤷♂️
118 |
119 | # Delayed [issues] service
120 |
121 | Sends issue in the last second. Yeah, shame on me!
122 | Install `delayed_userscript.js` in the tampermonkey, then run service itself: `py delayed_service.py`.
123 |
124 | ## It's not only about evil
125 |
126 | Delayed service actually has a lot more useful features. I don't remember any of them though, so that would be a surprise for you :)
127 |
128 |
--------------------------------------------------------------------------------
/append.html:
--------------------------------------------------------------------------------
1 |
2 |
9 |
158 |
--------------------------------------------------------------------------------
/instantview-frame.js:
--------------------------------------------------------------------------------
1 | var IV = {
2 | sendPostMessage: function(data) {
3 | try {
4 | window.parent.postMessage(JSON.stringify(data), window.parentOrigin);
5 | } catch(e) {}
6 | },
7 | frameClickHandler: function(e) {
8 | var target = e.target, href;
9 | do {
10 | if (target.tagName == 'SUMMARY') return;
11 | if (target.tagName == 'DETAILS') return;
12 | if (target.tagName == 'LABEL') return;
13 | if (target.tagName == 'AUDIO') return;
14 | if (target.tagName == 'A') break;
15 | } while (target = target.parentNode);
16 | if (target && target.hasAttribute('href')) {
17 | var base_loc = document.createElement('A');
18 | base_loc.href = window.currentUrl;
19 | if (base_loc.origin != target.origin ||
20 | base_loc.pathname != target.pathname ||
21 | base_loc.search != target.search) {
22 | IV.sendPostMessage({event: 'link_click', url: target.href});
23 | }
24 | }
25 | //e.preventDefault();
26 | },
27 | frameHoverHandler: function(e) {
28 | var target = e.target, href;
29 | if (e.type == 'mouseover') {
30 | target.classList.add('--tg-instantview-region-hovered');
31 | } else if (e.type == 'mouseout') {
32 | target.classList.remove('--tg-instantview-region-hovered');
33 | } else {
34 | if (e.metaKey || e.ctrlKey) {
35 | return IV.frameClickHandler(e);
36 | }
37 | target.classList.toggle('--tg-instantview-region-selected');
38 | IV.sendPostMessage({event: 'regions_change', regions: IV.getFrameRegions()});
39 | e.preventDefault();
40 | }
41 | },
42 | getFrameRegions: function(destDocument) {
43 | var regions = [];
44 | var elements = document.getElementsByClassName('--tg-instantview-region-selected');
45 | for (var j = 0; j < elements.length; j++) {
46 | var el = elements[j], tag = el.tagName;
47 | var tags = document.getElementsByTagName(tag);
48 | for (var i = 0; i < tags.length; i++) {
49 | if (tags[i] === el) {
50 | regions.push(el.tagName + '#' + i);
51 | break;
52 | }
53 | }
54 | }
55 | return regions.join(',');
56 | },
57 | initFrameRegions: function(no_edit, regions) {
58 | var styleEl = document.createElement('style');
59 | styleEl.innerHTML = '*.--tg-instantview-region-hovered{outline:2px dotted red !important;}*.--tg-instantview-region-selected{outline:2px solid red !important;}';
60 | document.body.appendChild(styleEl);
61 | if (no_edit) {
62 | regions = regions || '';
63 | if (regions) {
64 | regions = regions.split(',');
65 | var scrolled = false;
66 | for (var j = 0; j < regions.length; j++) {
67 | var region = regions[j].split('#');
68 | var tag = region[0], i = region[1], el;
69 | if (i.length) {
70 | if (el = document.getElementsByTagName(tag)[i]) {
71 | el.classList.add('--tg-instantview-region-selected');
72 | if (!scrolled) {
73 | scrolled = true;
74 | setTimeout(function() {
75 | if (el.scrollIntoViewIfNeeded) {
76 | el.scrollIntoViewIfNeeded();
77 | } else if (el.scrollIntoView) {
78 | el.scrollIntoView();
79 | }
80 | }, 100);
81 | }
82 | }
83 | }
84 | }
85 | }
86 | document.onclick = IV.frameClickHandler;
87 | } else {
88 | document.onclick = document.onmouseover = document.onmouseout = IV.frameHoverHandler;
89 | }
90 | },
91 | postMessageHandler: function(event) {
92 | if (event.source !== window.parent ||
93 | event.origin != window.parentOrigin) {
94 | return;
95 | }
96 | try {
97 | var data = JSON.parse(event.data);
98 | } catch(e) {
99 | var data = {};
100 | }
101 | if (data.event == 'init_regions') {
102 | IV.initFrameRegions(data.no_edit, data.regions);
103 | }
104 | },
105 | slideshowSlide: function(el, next) {
106 | var dir = window.getComputedStyle(el, null).direction || 'ltr';
107 | var marginProp = dir == 'rtl' ? 'marginRight' : 'marginLeft';
108 | if (next) {
109 | var s = el.previousSibling.s;
110 | s.value = (+s.value + 1 == s.length) ? 0 : +s.value + 1;
111 | s.forEach(function(el){ el.checked && el.parentNode.scrollIntoView && el.parentNode.scrollIntoView({behavior: 'smooth', block: 'center', inline: 'center'}); });
112 | el.firstChild.style[marginProp] = (-100 * s.value) + '%';
113 | } else {
114 | el.form.nextSibling.firstChild.style[marginProp] = (-100 * el.value) + '%';
115 | }
116 | return false;
117 | },
118 | initPreBlocks: function() {
119 | if (!hljs) return;
120 | var pres = document.getElementsByTagName('pre');
121 | for (var i = 0; i < pres.length; i++) {
122 | if (pres[i].hasAttribute('data-language')) {
123 | hljs.highlightBlock(pres[i]);
124 | }
125 | }
126 | },
127 | initEmbedBlocks: function() {
128 | var iframes = document.getElementsByTagName('iframe');
129 | for (var i = 0; i < iframes.length; i++) {
130 | (function(iframe) {
131 | window.addEventListener('message', function(event) {
132 | if (event.source !== iframe.contentWindow ||
133 | event.origin != window.origin) {
134 | return;
135 | }
136 | try {
137 | var data = JSON.parse(event.data);
138 | } catch(e) {
139 | var data = {};
140 | }
141 | if (data.eventType == 'resize_frame') {
142 | if (data.eventData.height) {
143 | iframe.style.height = data.eventData.height + 'px';
144 | }
145 | }
146 | }, false);
147 | })(iframes[i]);
148 | }
149 | }
150 | };
151 |
152 | document.onclick = IV.frameClickHandler;
153 | window.onmessage = IV.postMessageHandler;
154 |
--------------------------------------------------------------------------------
/spider.py:
--------------------------------------------------------------------------------
1 | import scrapy
2 | import ivdiff
3 | from urllib.parse import urlparse
4 | from multiprocessing import Pool, Event, Process
5 | from scrapy.crawler import CrawlerProcess, CrawlerRunner
6 | import argparse
7 | from scrapy.utils.project import get_project_settings
8 | import os
9 | import json
10 | from scrapy.spiders import Rule
11 | from scrapy.linkextractors import LinkExtractor
12 | import ctypes
13 | from twisted.internet import reactor
14 | from threading import Thread
15 | import readchar
16 | from lxml import etree
17 | import re
18 |
19 |
20 | class IvSpider(scrapy.spiders.CrawlSpider):
21 | name = 'IvSpider'
22 | dupl = []
23 | url_list = {}
24 | crawled = 0
25 | parsed = 0
26 | olds = 0
27 | have_diff = 0
28 |
29 | def __init__(self, pool, event=None, whitelist=[], ignore="", restrict_xpaths="", nobrowser=False, browser="", domain="", cookies="cookies.txt", poolsize=5, **kwargs):
30 | if not domain.startswith("http"):
31 | domain = "http://" + domain
32 | self.start_urls = [domain]
33 |
34 | d = urlparse(domain).netloc
35 | if domain.startswith("www."):
36 | d = domain[4:]
37 |
38 | self.allowed_domains = [d, d + ":443"]
39 | self.browser = browser
40 | self.whitelist = whitelist
41 | self.nobrowser = nobrowser
42 | self.ignore = ignore
43 |
44 | self.restrict_xpaths = restrict_xpaths
45 | self.pool = pool
46 | self.rules = [Rule(LinkExtractor(allow=(), allow_domains=self.allowed_domains, deny=(ignore), deny_domains=("reklama.oblast45.ru")), callback='parse_item', follow=True)]
47 |
48 | print(cookies)
49 | self.cookies = ivdiff.parseCookies(cookies)
50 | fn = "gen/{}/url_list.json".format(d)
51 | try:
52 | os.makedirs(os.path.dirname(fn))
53 | except Exception:
54 | pass
55 |
56 | self.file = open(fn, "a+", buffering=1)
57 | self.file.seek(0)
58 | try:
59 | self.url_list = json.loads(self.file.read())
60 | except Exception as ex:
61 | print(ex)
62 | pass
63 | self.file.seek(0)
64 |
65 | super().__init__(**kwargs)
66 |
67 | def callback(self, roflan):
68 | self.parsed += 1
69 | if roflan is None or roflan[1] == -1:
70 | print(f"an error occured for url {roflan[0]}")
71 | return
72 | if roflan[1] == 1:
73 | self.have_diff += 1
74 | ctypes.windll.kernel32.SetConsoleTitleW(f"{(self.parsed / self.crawled * 100):.2f}% [{self.parsed} / crawled {self.crawled}] (w/ diff {self.have_diff}, old {self.olds}) | diffed {roflan[0]}")
75 | # print("{0:.2f}% [{1} / crawled {2}] (w/ diff {3}) {4}".format(self.parsed / self.crawled * 100, self.parsed, self.crawled, self.have_diff, roflan[0]))
76 | self.url_list[roflan[0]] = True
77 | self.file.seek(0)
78 | self.file.truncate(0)
79 | self.file.seek(0)
80 | self.file.write(json.dumps(self.url_list))
81 | self.file.seek(0)
82 |
83 | def addToPool(self, url):
84 | if url in self.url_list:
85 | if not self.url_list[url]:
86 | pass
87 | self.pool.apply_async(ivdiff.checkDiff, [self.nobrowser, self.cookies, url, self.t1, self.t2, self.browser], callback=self.callback)
88 | return
89 | self.crawled += 1
90 | self.url_list[url] = False
91 | ctypes.windll.kernel32.SetConsoleTitleW(f"{(self.parsed / self.crawled * 100):.2f}% [{self.parsed} / crawled {self.crawled}] (w/ diff {self.have_diff}, old {self.olds}) | added {url} to pool")
92 | self.pool.apply_async(ivdiff.checkDiff, [self.nobrowser, self.cookies, url, self.t1, self.t2, self.browser], callback=self.callback)
93 |
94 | def parse_item(self, response):
95 | for i in self.restrict_xpaths:
96 | if len(response.xpath(i)) > 0:
97 | # print(f"restrict {etree.tostring(response.xpath(i)[0], pretty_print=True)}")
98 | self.olds += 1
99 | return
100 | for i in self.whitelist:
101 | if len(response.xpath(i)) > 0:
102 | self.addToPool(response.url)
103 | if len(self.whitelist) == 0:
104 | self.addToPool(response.url)
105 |
106 |
107 | if __name__ == '__main__':
108 | parser = argparse.ArgumentParser(description='Crawl whole website and diff it')
109 | parser.add_argument('t1', metavar='first_template', type=str, help='first template number OR template file path')
110 | parser.add_argument('t2', metavar='second_template', type=str, help='second template number OR template file path')
111 | parser.add_argument('domain', metavar='domain', type=str, help='domain to crawl')
112 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt")
113 | parser.add_argument('--poolsize', '-p', help='concurrent connections count(default=5)', type=int, nargs='?', default=5)
114 | parser.add_argument('--nobrowser', '-n', help='do not open browser when diff is found', action='store_true')
115 | parser.add_argument('--browser', '-b', help='browser or path to program to open diff', nargs='?', default="")
116 | parser.add_argument('--ignore', '-i', help='regex with links to ignore (file or string)', nargs='+')
117 | parser.add_argument('--restrict_xpaths', '-r', help='xpath to ignore', nargs='+')
118 | parser.add_argument('--whitelist', '-w', help='xpath of what pages should be crawled', nargs='+')
119 |
120 | args = parser.parse_args()
121 |
122 | settings = get_project_settings()
123 | settings['LOG_LEVEL'] = 'INFO'
124 | settings["QUERYCLEANER_REMOVE"] = ".*"
125 | settings['SPIDER_MIDDLEWARES'] = {
126 | 'scrapy_querycleaner.QueryCleanerMiddleware': 100
127 | }
128 | settings["USER_AGENT"] = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
129 |
130 | # process = CrawlerProcess(settings)
131 | event = Event()
132 | pool = Pool(args.poolsize, ivdiff.setup, (event,))
133 | event.set()
134 |
135 | ignore = ""
136 | try:
137 | c = open(args.ignore[0], "r")
138 | ignore = c.read()
139 | ignore = ignore.split("\n")
140 | c.close()
141 | except Exception as ex:
142 | ignore = args.ignore
143 | print(args.cookies)
144 | print(f"ignore: {ignore}")
145 |
146 | # spider = IvSpider(ignore=ignore, cookies=args.cookies, nobrowser=args.nobrowser, browser=args.browser, domain=args.domain, t1=args.t1, t2=args.t2, poolsize=args.poolsize)
147 | # crawler = CrawlerProcess(get_project_settings())
148 | # process = Process(target=crawler.start, stop_after_crawl=False)
149 |
150 | # crawler = CrawlerScript(spider)
151 | print(args.restrict_xpaths)
152 | if args.restrict_xpaths is None:
153 | args.restrict_xpaths = []
154 | if args.whitelist is None:
155 | args.whitelist = []
156 | whitelist = [re.sub(r"has-class\((\".*?\")\)", "contains(concat(' ', normalize-space(@class), ' '), \\1)", i) for i in args.whitelist]
157 | restrict_xpaths = [re.sub(r"has-class\((\".*?\")\)", "contains(concat(' ', normalize-space(@class), ' '), \\1)", i) for i in args.restrict_xpaths]
158 |
159 | crawler = CrawlerProcess(settings)
160 | d = crawler.crawl(IvSpider, pool=pool, event=event, restrict_xpaths=args.restrict_xpaths, ignore=ignore, cookies=args.cookies, nobrowser=args.nobrowser, browser=args.browser, domain=args.domain, t1=args.t1, t2=args.t2, poolsize=args.poolsize)
161 | d.addBoth(lambda _: reactor.stop())
162 | pause = False
163 | Thread(target=reactor.run, args=(False,)).start()
164 | crawl_paused = False
165 |
166 | while True:
167 | e = readchar.readchar()
168 | if e == b'r':
169 | reactor.stop()
170 | print("Killed reactor")
171 |
172 | if e == b'q':
173 | print("quit")
174 | break
175 |
176 | if e == b'k':
177 | pool.terminate()
178 | print("Killed pool")
179 |
180 | if e == b' ':
181 | pause = not pause
182 | if pause:
183 | event.clear()
184 | else:
185 | event.set()
186 | print(f"pause = {pause}")
187 |
188 | # process.crawl(IvSpider, event=event, ignore=ignore, cookies=args.cookies, nobrowser=args.nobrowser, browser=args.browser, domain=args.domain, t1=args.t1, t2=args.t2, poolsize=args.poolsize)
189 | # process.start()
190 |
191 | # while True:
192 | # i = input()
193 | # print(i)
194 |
--------------------------------------------------------------------------------
/delayed_service.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from flask import jsonify
3 | from flask import current_app
4 | from flask import request
5 | import threading
6 | import requests
7 | from lxml import html
8 | import datetime
9 | import ivdiff
10 | import re
11 | import json as js
12 | import webbrowser
13 | import time as tt
14 | from hashlib import md5
15 | import os
16 | from time import sleep
17 |
18 | app = Flask(__name__)
19 | verify = True
20 | webhook = "https://integram.org/webhook/yourwebhook" # t.me/bullhorn_bot
21 | fail_retry = 5
22 | fail_wait_time = 20
23 |
24 |
25 | def notify(message):
26 | requests.post(webhook, json={"text": message})
27 |
28 |
29 | @app.route("/download", methods=["POST"])
30 | def download():
31 | try:
32 | json = request.get_json()
33 | r = requests.get(json["url"])
34 | md = md5()
35 | md.update(json["url"].encode("utf8"))
36 |
37 | fn = "download/{}.html".format(str(md.hexdigest()))
38 |
39 | file = open(fn, "w", errors="ignore")
40 | file.write(r.text)
41 | file.close()
42 | path = os.path.dirname(os.path.realpath(__file__))
43 | os.system("\"E:\\Sublime Text 3\\subl.exe\" {}/{}".format(path, fn))
44 | sleep(1)
45 | os.system("\"E:\\Sublime Text 3\\subl.exe\" --command htmlprettify")
46 | sleep(1)
47 | os.system("\"E:\\Sublime Text 3\\subl.exe\" --command save")
48 | return jsonify({
49 | "status": "ok"
50 | })
51 | except Exception as ex:
52 | return jsonify({
53 | "status": "not ok",
54 | "error": str(ex)
55 | })
56 |
57 |
58 | @app.route("/remove", methods=["POST"])
59 | def remove():
60 | json = request.get_json()
61 | with app.app_context():
62 | if json["section"] not in current_app.delayed or str(json["rules_id"]) not in current_app.delayed[json["section"]] or json not in current_app.delayed[json["section"]][str(json["rules_id"])]:
63 | return jsonify({
64 | "status": "not ok",
65 | "error": "not found"
66 | })
67 | current_app.delayed[json["section"]][str(json["rules_id"])].remove(json)
68 | file = open("issues.json", "w")
69 | file.write(js.dumps(current_app.delayed))
70 | file.close()
71 | return jsonify({
72 | "status": "ok"
73 | })
74 |
75 |
76 | @app.route("/remove_all", methods=["POST"])
77 | def remove_all():
78 | json = request.get_json()
79 | with app.app_context():
80 | if json["section"] not in current_app.all or str(json["rules_id"]) not in current_app.all[json["section"]] or json not in current_app.all[json["section"]][str(json["rules_id"])]:
81 | return jsonify({
82 | "status": "not ok",
83 | "error": "not found"
84 | })
85 | current_app.all[json["section"]][str(json["rules_id"])].remove(json)
86 | file = open("issues_all.json", "w")
87 | file.write(js.dumps(current_app.all))
88 | file.close()
89 | return jsonify({
90 | "status": "ok"
91 | })
92 |
93 |
94 | @app.route("/post_now", methods=["POST"])
95 | def post_now():
96 | json = request.get_json()
97 | with app.app_context():
98 | if json["section"] not in current_app.delayed or str(json["rules_id"]) not in current_app.delayed[json["section"]] or json not in current_app.delayed[json["section"]][str(json["rules_id"])]:
99 | return jsonify({
100 | "status": "not ok",
101 | "error": "not found"
102 | })
103 | if not send_issue(json):
104 | return jsonify({
105 | "status": "not ok",
106 | "error": "issue failed to send"
107 | })
108 | return jsonify({
109 | "status": "ok"
110 | })
111 |
112 |
113 | @app.route("/diff", methods=["POST"])
114 | def diff():
115 | cookies = ivdiff.parseCookies("cookies.txt")
116 | json = request.get_json()
117 | print(json)
118 | result = ivdiff.checkDiff(False, cookies, json["url"], json["rules_id"], "~")
119 | if result is None or result[1] < 0:
120 | return jsonify({
121 | "status": "not ok",
122 | "error": result[1]
123 | })
124 | return jsonify({
125 | "status": "ok",
126 | "has_diff": result[1] == 1
127 | })
128 |
129 |
130 | @app.route("/undiff", methods=["POST"])
131 | def undiff():
132 | cookies = ivdiff.parseCookies("cookies.txt")
133 | json = request.get_json()
134 | print(json)
135 | if ivdiff.getHtml(json["section"], cookies, json["section"], "~", "~") is None:
136 | return jsonify({
137 | "status": "not ok"
138 | })
139 | return jsonify({
140 | "status": "ok"
141 | })
142 |
143 |
144 | @app.route("/snippets", methods=["GET"])
145 | def snippets():
146 | file = open("snippets.json", "r")
147 | ls = js.loads(file.read())
148 | file.close()
149 | return jsonify({
150 | "status": "ok",
151 | "snippets": ls
152 | })
153 |
154 |
155 | @app.route("/list", methods=["GET"])
156 | def list():
157 | return jsonify({
158 | "status": "ok",
159 | "list": current_app.delayed
160 | })
161 |
162 |
163 | @app.route("/get_issues", methods=["POST"])
164 | def get_issues():
165 | json = request.get_json()
166 | section = json["section"]
167 | rules_id = json["rules"]
168 |
169 | if section not in current_app.all:
170 | return jsonify({
171 | "status": "not ok",
172 | "error": "no such section " + section
173 | })
174 | ok = []
175 | for i in rules_id:
176 | if str(i) in current_app.all[section]:
177 | for j in current_app.all[section][str(i)]:
178 | ok.append(j)
179 | if len(ok) == 0:
180 | return jsonify({
181 | "status": "not ok",
182 | "error": "no issues for candidate"
183 | })
184 |
185 | return jsonify({
186 | "status": "ok",
187 | "list": ok
188 | })
189 |
190 |
191 | def report_to_string(json):
192 | return f"{json['comment']}"
193 |
194 |
195 | def send_issue(json):
196 | error = ""
197 | data = "DATA"
198 | report_url = f"https://instantview.telegram.org/contest/{json['section']}/template{json['rules_id']}/?url={json['url']}"
199 | try:
200 | with app.app_context():
201 | if json["section"] not in current_app.delayed or str(json["rules_id"]) not in current_app.delayed[json["section"]] or json not in current_app.delayed[json["section"]][str(json["rules_id"])]:
202 | print(f"issue {json} not found")
203 | return
204 | print("Posting issue", json)
205 | cookies = ivdiff.parseCookies("cookies.txt")
206 |
207 | d = "https://instantview.telegram.org/contest/{}/template{}".format(json["section"], json["rules_id"])
208 |
209 | headers = {
210 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
211 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
212 | "Accept-Language": "en-US,en;q=0.5",
213 | "Accept-Encoding": "gzip, deflate, br",
214 | "Referer": d,
215 | "Connection": "keep-alive",
216 | "Upgrade-Insecure-Requests": "1",
217 | "Cache-Control": "max-age=0"
218 | }
219 | random_id = ""
220 | for retry in range(0, fail_retry):
221 | try:
222 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=json["url"]))
223 |
224 | hash = re.search("contest\\?hash=(.*?)\",", str(r.content)).group(1)
225 | print(hash)
226 |
227 | jj = json.copy()
228 | del jj["random_id"]
229 |
230 | headers["X-Requested-With"] = "XMLHttpRequest"
231 | headers["Accept"] = "application/json, text/javascript, */*; q=0.01"
232 |
233 | # r = requests.post("https://instantview.telegram.org/api/contest", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=data)
234 | final = ""
235 | fail = tt.time()
236 | lastTry = False
237 | # total_fail = 0
238 | while "result_doc_url" not in final:
239 | data = {**jj, "method": "processByRules", "rules": "", "random_id": random_id}
240 | r = requests.post("https://instantview.telegram.org/api/contest", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=data)
241 | final = r.json()
242 | try:
243 | random_id = final["random_id"]
244 |
245 | # if "status" not in final:
246 | if "result_doc_url" not in final:
247 | # print(time.time() - fail)
248 | if tt.time() - fail >= fail_wait_time:
249 | if lastTry:
250 | print(f"struggling on page for more than {fail_wait_time * 2} seconds, trying from start")
251 | return None
252 |
253 | print(f"struggling on page for more than {fail_wait_time} seconds, trying without random_id")
254 | random_id = ""
255 | lastTry = True
256 | fail = tt.time()
257 |
258 | except Exception as ex:
259 | print(f"{ex} {final}")
260 | # random_id = r.json()["random_id"]
261 |
262 | data = {**jj, "method": "sendIssue", "random_id": random_id}
263 | data["type"] = int(data["type"])
264 | r = requests.post("https://instantview.telegram.org/api/contest", headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=data)
265 | error = r.content
266 |
267 | reply = r.json()
268 | if "error" in reply:
269 | print(f"Telegram-side error for {report_url}\n{report_to_string(json)}\n`{reply['error']}`")
270 | notify(f"Telegram-side error for {report_url}\n{report_to_string(json)}\n`{reply['error']}`")
271 | return False
272 | issue_url = "https://instantview.telegram.org/" + reply["redirect_to"]
273 | webbrowser.open_new_tab(issue_url)
274 |
275 | with app.app_context():
276 | current_app.delayed[json["section"]][str(json["rules_id"])].remove(json)
277 | file = open("issues.json", "w")
278 | file.write(js.dumps(current_app.delayed))
279 | file.close()
280 | notify(f"Issue successfully sent {issue_url}")
281 | return True
282 | except Exception as ex:
283 | print(f"ERROR while posting issue {data} {hash}, retry {retry}, {ex}")
284 | notify(f"Failed to send issue `{report_to_string(json)}`\n{error}\nReport it NOW!\n{report_url}")
285 | return False
286 | except Exception as ex:
287 | print(ex)
288 | error = str(ex)
289 | notify(f"Failed to send issue `{json}`\n{error}\nReport it NOW!\n{report_url}\n\n{json['comment']}")
290 | return False
291 |
292 |
293 | @app.route("/report", methods=["POST"])
294 | def report():
295 | json = request.get_json()
296 |
297 | r = requests.get(f"https://instantview.telegram.org/contest/{json['section']}/template{json['rules_id']}/")
298 | tree = html.fromstring(r.content).xpath("//p[contains(@class, \"about-text\")]/text()")
299 | date = datetime.datetime.strptime(tree[1], " at %I:%M %p, %b %d.").replace(year=2019)
300 | reportTime = date + datetime.timedelta(days=3) - datetime.timedelta(minutes=25)
301 | time = reportTime - datetime.datetime.utcnow()
302 |
303 | print("Delaying issue", json)
304 | t = threading.Timer(time.total_seconds(), send_issue, [json])
305 | t.start()
306 |
307 | json["reportTime"] = str(reportTime)
308 |
309 | if json["section"] not in current_app.delayed:
310 | current_app.delayed[json["section"]] = {}
311 | if str(json["rules_id"]) not in current_app.delayed[json["section"]]:
312 | current_app.delayed[json["section"]][str(json["rules_id"])] = []
313 | current_app.delayed[json["section"]][str(json["rules_id"])].append(json)
314 | file = open("issues.json", "w")
315 | file.write(js.dumps(current_app.delayed))
316 | file.close()
317 |
318 | return jsonify({
319 | "status": "ok",
320 | "date": str(time)
321 | })
322 |
323 |
324 | @app.route("/add_issue", methods=["POST"])
325 | def add_issue():
326 | json = request.get_json()
327 |
328 | # r = requests.get(f"https://instantview.telegram.org/contest/{json['section']}/candidate{json['rules_id']}/")
329 |
330 | print("Adding issue", json)
331 |
332 | if json["section"] not in current_app.all:
333 | current_app.all[json["section"]] = {}
334 | if str(json["rules_id"]) not in current_app.all[json["section"]]:
335 | current_app.all[json["section"]][str(json["rules_id"])] = []
336 | current_app.all[json["section"]][str(json["rules_id"])].append(json)
337 | file = open("issues_all.json", "w")
338 | file.write(js.dumps(current_app.all))
339 | file.close()
340 |
341 | return jsonify({
342 | "status": "ok"
343 | })
344 |
345 |
346 | if __name__ == "__main__":
347 | with app.app_context():
348 | try:
349 | file = open("issues.json", "r")
350 | current_app.delayed = js.loads(file.read())
351 | for domain in current_app.delayed:
352 | data = current_app.delayed[domain]
353 | for rule in data:
354 | issues = data[rule]
355 | for issue in issues:
356 | reportTime = datetime.datetime.strptime(issue["reportTime"], "%Y-%m-%d %H:%M:%S")
357 | time = reportTime - datetime.datetime.utcnow()
358 |
359 | print("Delaying issue", issue)
360 | t = threading.Timer(time.total_seconds(), send_issue, [issue])
361 | t.start()
362 | except Exception as ex:
363 | current_app.delayed = {}
364 |
365 | try:
366 | file = open("issues_all.json", "r")
367 | current_app.all = js.loads(file.read())
368 | except Exception as ex:
369 | current_app.all = {}
370 | app.run(host="0.0.0.0", port=5000)
371 |
--------------------------------------------------------------------------------
/ivdiff.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 | import re
4 | from lxml import etree
5 | # from lxml.html.clean import Cleaner
6 | from io import StringIO
7 | import difflib
8 | import webbrowser
9 | import os
10 | import argparse
11 | from urllib.parse import urlparse
12 | from http.cookies import SimpleCookie
13 | from hashlib import md5
14 | import time
15 | import copy
16 | import json
17 | import urllib3
18 | from lxml import html
19 | from colorama import Back, Style
20 | import colorama
21 |
22 | colorama.init()
23 | logging.basicConfig(filename="ivdiff.log", level=logging.INFO)
24 | htmlparser = etree.HTMLParser(remove_blank_text=True)
25 | # cleaner = Cleaner(style=True)
26 | verify = False
27 | if not verify:
28 | urllib3.disable_warnings()
29 |
30 |
31 | global unpaused
32 | unpaused = None
33 |
34 |
35 | def diffonlyParse(rules, template, reverse=False):
36 | r = rules.splitlines()
37 | domode = 0
38 | ff = []
39 | lines = ""
40 | variables = {}
41 | for j in r:
42 | i = j.strip()
43 | if i.startswith("##"):
44 | args = i[2:].split(" ")
45 | ff = args[1:]
46 | for i in variables:
47 | v = variables[i]
48 | if i in ff:
49 | ff.append(v)
50 |
51 | if args[0] == "do":
52 | # print("diffonly start")
53 | domode = 1
54 | if reverse or (len(ff) > 0 and str(template) not in ff):
55 | # print("REVERSE")
56 | domode = -domode
57 | elif args[0] == "?":
58 | # print("else")
59 | domode = -domode
60 | elif args[0] == "cdo":
61 | # print("comment diffonly start")
62 | domode = -1
63 | if reverse or (len(ff) > 0 and str(template) not in ff):
64 | # print(ff)
65 | # print(template)
66 | # print("REVERSE")
67 | domode = -domode
68 | elif args[0] == "" and len(args) == 1:
69 | # print("end")
70 | domode = 0
71 | elif args[0] == "s":
72 | variables[args[1]] = args[2]
73 | # print(variables)
74 | else:
75 | if domode == 1:
76 | # print(f"uncomment line {j}")
77 | j = re.sub(r"^(\s*)#+(.*)$", "\\1\\2", j)
78 | elif domode == -1:
79 | # print(f"comment line {j}")
80 | if re.match(r"^(\s*)#+(.*)$", j) is None:
81 | j = "#" + j
82 | lines += j + "\n"
83 | # print(lines)
84 | return lines
85 |
86 |
87 | def getHtml(domain, cookies, url, template, c_with=None):
88 | rules = ""
89 | if template != "~":
90 | try:
91 | templNumber = str(int(template))
92 | contest = "contest"
93 | except ValueError:
94 | la = open(template, "r", encoding='utf8')
95 | rules = str(la.read())
96 | la.close()
97 | contest = "my"
98 | templNumber = ""
99 | else:
100 | contest = "my"
101 | templNumber = ""
102 |
103 | if contest == "my":
104 | d = "https://instantview.telegram.org/{}/{}".format(contest, domain)
105 | else:
106 | d = "https://instantview.telegram.org/{}/{}/template{}".format(contest, domain, templNumber)
107 |
108 | headers = {
109 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
110 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
111 | "Accept-Language": "en-US,en;q=0.5",
112 | "Accept-Encoding": "gzip, deflate, br",
113 | "Referer": d,
114 | "Connection": "keep-alive",
115 | "Upgrade-Insecure-Requests": "1",
116 | "Cache-Control": "max-age=0"
117 | }
118 | logging.info("-- Getting html for {} --".format(url.encode("ascii")))
119 | # print("-- Getting html for {} --".format(url.encode("ascii")))
120 |
121 | if contest == "my" and template == "~":
122 | all = getHashAndRules(domain, url, cookies)
123 | rules = diffonlyParse(all[0], c_with, c_with == template)
124 | hash = all[1]
125 | cookies = all[2]
126 | if len(rules) < 10:
127 | print(f"CRITICAL ERROR RULES EMPTY! {rules}")
128 | return None
129 | else:
130 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=url))
131 | cookies = dict(list(cookies.items()) + list(r.cookies.get_dict().items()))
132 |
133 | hash = re.search("{}\\?hash=(.*?)\",".format(contest), str(r.content)).group(1)
134 | # logging.info("hash={}".format(hash))
135 | # print(f"got hash {hash}")
136 |
137 | rules = rules.encode('utf-8')
138 | headers["X-Requested-With"] = "XMLHttpRequest"
139 | headers["Accept"] = "application/json, text/javascript, */*; q=0.01"
140 | r = requests.post("https://instantview.telegram.org/api/{}".format(contest), headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=url, section=domain, method="processByRules", rules_id=templNumber, rules=rules, random_id=""))
141 | random_id = r.json()["random_id"]
142 | # logging.info("random_id={}".format(random_id))
143 | # print(f"got random id {random_id}")
144 |
145 | final = ""
146 | fail = time.time()
147 | lastTry = False
148 | # total_fail = 0
149 | while "result_doc_url" not in final:
150 | r = requests.post("https://instantview.telegram.org/api/{}".format(contest), headers=headers, verify=verify, cookies=cookies, params=dict(hash=hash), data=dict(url=url, section=domain, method="processByRules", rules_id=templNumber, rules=rules, random_id=random_id))
151 | final = r.json()
152 | try:
153 | random_id = final["random_id"]
154 |
155 | # if "status" not in final:
156 | if "result_doc_url" not in final:
157 | # print(time.time() - fail)
158 | if time.time() - fail >= 5:
159 | if lastTry:
160 | # print(f"struggling on page for more than 10 seconds, trying from start in 45s {url}")
161 | print(Back.LIGHTRED_EX + "5" + Style.RESET_ALL, end="")
162 | return None
163 |
164 | # print(f"struggling on page for more than 5 seconds, trying without random_id {url}")
165 | random_id = ""
166 | lastTry = True
167 | fail = time.time()
168 |
169 | except Exception as ex:
170 | print(f"{ex} {final}")
171 |
172 | random_id = final["random_id"]
173 | u = final["result_doc_url"]
174 | preview_html = final["preview_html"]
175 |
176 | logging.info("loading page {}".format(u))
177 | r = requests.get(u, verify=verify, cookies=cookies)
178 | if r.status_code != 200:
179 | print(f"{r.status_code}, trying again {url}")
180 | return None
181 | error = ""
182 |
183 | if "NESTED_ELEMENT_NOT_SUPPORTED" in str(r.content):
184 | # print(Back.LIGHTRED_EX + "N" + Style.RESET_ALL, end="")
185 | error = Back.LIGHTRED_EX + "N" + Style.RESET_ALL
186 | print(error, end="")
187 | logging.error("NESTED_ELEMENT_NOT_SUPPORTED in {}".format(url.encode("ascii")))
188 | if "PAGE_NOT_FETCHED" in str(r.content):
189 | error = Back.RED + "P" + Style.RESET_ALL
190 | print(error, end="")
191 | logging.error("PAGE_NOT_FETCHED in {}".format(url.encode("ascii")))
192 | b = r.content.decode("utf-8").replace(u"\xa0", " ")
193 | b = re.sub(' +', ' ', b)
194 | tree = etree.parse(StringIO(b), htmlparser)
195 | if preview_html is not False:
196 | preview_html_tree = etree.parse(StringIO(preview_html), htmlparser)
197 | else:
198 | preview_html_tree = None
199 | # remove nbsp and bullshit
200 | # tree = cleaner.clean_html(tree)
201 |
202 | logging.info("-- FINISHED --")
203 | return (d + "?url=" + url, tree, preview_html_tree, cookies)
204 |
205 |
206 | def compare(f, s):
207 | # You can remove elements before diff if you want to
208 |
209 | # for bad in s.xpath("//h6[@data-block=\"Kicker\"]"):
210 | # bad.getparent().remove(bad)
211 | # for bad in f.xpath("//footer[last()]"):
212 | # bad.getparent().remove(bad)
213 | # for bad in f.xpath("//*[contains(@class, \"related\")]"):
214 | # # del bad.attrib["href"]
215 | # # del bad.attrib["target"]
216 | # # del bad.attrib["onclick"]
217 | # bad.getparent().remove(bad)
218 | # for bad in s.xpath("//*[contains(@class, \"related\")]"):
219 | # # del bad.attrib["href"]
220 | # # del bad.attrib["target"]
221 | # # del bad.attrib["onclick"]
222 | # bad.getparent().remove(bad)
223 | for bad in f.xpath("//article/address//a[@rel=\"author\"]"):
224 | try:
225 | del bad.attrib["href"]
226 | del bad.attrib["target"]
227 | del bad.attrib["onclick"]
228 | except Exception:
229 | pass
230 |
231 | # for bad in f.xpath("//article/address/figure"):
232 | # bad.getparent().remove(bad)
233 |
234 | # for bad in s.xpath("//article/address/figure"):
235 | # bad.getparent().remove(bad)
236 |
237 | for bad in f.xpath("//h4[@data-block=\"Subheader\"]"):
238 | bad.attrib["data-block"] = "Header"
239 | bad.tag = "h3"
240 |
241 | for bad in s.xpath("//h4[@data-block=\"Subheader\"]"):
242 | bad.attrib["data-block"] = "Header"
243 | bad.tag = "h3"
244 |
245 | for bad in f.xpath("//article/address//a[@onclick]"):
246 | try:
247 | del bad.attrib["onclick"]
248 | except Exception:
249 | pass
250 |
251 | for bad in s.xpath("//article/address//a[@rel=\"author\"]"):
252 | try:
253 | del bad.attrib["href"]
254 | del bad.attrib["target"]
255 | del bad.attrib["onclick"]
256 | except Exception:
257 | pass
258 |
259 | for bad in f.xpath("//p[string-length(normalize-space(.)) = 0]"):
260 | bad.getparent().remove(bad)
261 |
262 | for bad in s.xpath("//p[string-length(normalize-space(.)) = 0]"):
263 | bad.getparent().remove(bad)
264 |
265 | # for bad in f.xpath("//section[@class=\"related\"]"):
266 | # bad.getparent().remove(bad)
267 |
268 | # for bad in s.xpath("//section[@class=\"related\"]"):
269 | # bad.getparent().remove(bad)
270 |
271 | for bad in f.xpath("//div[@class=\"iframe-wrap\"]"):
272 | try:
273 | del bad.attrib["style"]
274 | for bad_i in bad.xpath(".//*"):
275 | if "width" in bad_i.attrib:
276 | del bad_i.attrib["width"]
277 | del bad_i.attrib["height"]
278 | if "style" in bad_i.attrib:
279 | del bad_i.attrib["style"]
280 | except Exception:
281 | pass
282 |
283 | for bad in s.xpath("//div[@class=\"iframe-wrap\"]"):
284 | try:
285 | del bad.attrib["style"]
286 | for bad_i in bad.xpath(".//*"):
287 | if "width" in bad_i.attrib:
288 | del bad_i.attrib["width"]
289 | del bad_i.attrib["height"]
290 | if "style" in bad_i.attrib:
291 | del bad_i.attrib["style"]
292 | except Exception:
293 | pass
294 |
295 | for bad in f.xpath("//p"):
296 | if bad.text is not None:
297 | bad.text = re.sub(r"^\s*(.*?)\s*$", "\\1", bad.text)
298 |
299 | for bad in s.xpath("//p"):
300 | if bad.text is not None:
301 | bad.text = re.sub(r"^\s*(.*?)\s*$", "\\1", bad.text)
302 |
303 | # for bad in s.xpath("//figure[@data-block=\"Slideshow\"]"):
304 | # slideshow = bad.xpath(".//figure[@class=\"slideshow\"]/*")
305 | # for i in slideshow[::-1]:
306 | # fc = i.xpath("./figcaption")[0]
307 | # span = fc.xpath("./span")[0]
308 | # for j in span.xpath("./*"):
309 | # span.addprevious(j)
310 | # if span.text is not None:
311 | # fc.text = span.text
312 | # fc.remove(span)
313 | # bad.addnext(i)
314 | # bad.getparent().remove(bad)
315 |
316 | # for bad in f.xpath("//figure[@data-block=\"Slideshow\"]"):
317 | # slideshow = bad.xpath(".//figure[@class=\"slideshow\"]/*")
318 | # for i in slideshow[::-1]:
319 | # fc = i.xpath("./figcaption")[0]
320 | # span = fc.xpath("./span")[0]
321 | # for j in span.xpath("./*"):
322 | # span.addprevious(j)
323 | # if span.text is not None:
324 | # fc.text = span.text
325 | # fc.remove(span)
326 | # bad.addnext(i)
327 | # bad.getparent().remove(bad)
328 |
329 | for bad in s.xpath("//article/address//a[@onclick]"):
330 | try:
331 | del bad.attrib["onclick"]
332 | except Exception:
333 | pass
334 |
335 | # for bad in s.xpath("//article//footer"):
336 | # bad.getparent().remove(bad)
337 |
338 | # for bad in f.xpath("//article//footer"):
339 | # bad.getparent().remove(bad)
340 |
341 | # for bad in s.xpath("//article/address"):
342 | # # del bad.attrib["href"]
343 | # # del bad.attrib["target"]
344 | # # del bad.attrib["onclick"]
345 | # bad.getparent().remove(bad)
346 |
347 | # for bad in s.xpath("//article/address/time"):
348 | # bad.getparent().remove(bad)
349 |
350 | # for bad in f.xpath("//article/address/time"):
351 | # bad.getparent().remove(bad)
352 | # # # for bad in s.xpath("//article/address"):
353 | # # # # del bad.attrib["href"]
354 | # # # # del bad.attrib["target"]
355 | # # # # del bad.attrib["onclick"]
356 | # # # bad.getparent().remove(bad)
357 |
358 | for img in f.xpath("//img"):
359 | del img.attrib["alt"]
360 | del img.attrib["title"]
361 | for img in s.xpath("//img"):
362 | del img.attrib["alt"]
363 | del img.attrib["title"]
364 |
365 | for img in f.xpath("//video"):
366 | del img.attrib["alt"]
367 | del img.attrib["title"]
368 | for img in s.xpath("//video"):
369 | del img.attrib["alt"]
370 | del img.attrib["title"]
371 |
372 | pass
373 |
374 |
375 | def setup(event):
376 | global unpaused
377 | unpaused = event
378 |
379 |
380 | def getHashAndRules(domain, url, cookies):
381 | d = "https://instantview.telegram.org/my/{}".format(domain)
382 |
383 | headers = {
384 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0",
385 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
386 | "Accept-Language": "en-US,en;q=0.5",
387 | "Accept-Encoding": "gzip, deflate, br",
388 | "Referer": d,
389 | "Connection": "keep-alive",
390 | "Upgrade-Insecure-Requests": "1",
391 | "Cache-Control": "max-age=0"
392 | }
393 | r = requests.get(d, headers=headers, verify=verify, cookies=cookies, params=dict(url=url))
394 | cookies = dict(list(cookies.items()) + list(r.cookies.get_dict().items()))
395 |
396 | hash = re.search("my\\?hash=(.*?)\",", str(r.content)).group(1)
397 | tree = html.fromstring(r.content.decode("utf8"))
398 |
399 | rules = json.loads(re.search("initWorkspace\\(\".*?\",(.*)\\);", tree.xpath("//script[last()]/text()")[0]).group(1))
400 | return (rules["rules"], hash, cookies)
401 |
402 |
403 | def checkDiff(nobrowser, cookies, url, t1, t2, browser=""):
404 | try:
405 | if unpaused is not None:
406 | unpaused.wait()
407 | print(Back.LIGHTYELLOW_EX + " " + Style.RESET_ALL, end="")
408 | if not url.startswith("http"):
409 | url = "http://" + url
410 |
411 | domain = urlparse(url).hostname
412 | if domain.startswith("www."):
413 | domain = domain[4:]
414 |
415 | f1 = None
416 | s1 = None
417 |
418 | # TODO switch to next cookie if this fails too much
419 | # For now just start over with the same cookie
420 | cookies = [cookies]
421 | cookie = -1
422 | while f1 is None:
423 | cookie += 1
424 | if cookie >= len(cookies):
425 | cookie = 0
426 | time.sleep(45)
427 | f1 = getHtml(domain, cookies[cookie], url, t1, t2)
428 |
429 | if unpaused is not None:
430 | unpaused.wait()
431 | cookie = -1
432 | while s1 is None:
433 | cookie += 1
434 | if cookie >= len(cookies):
435 | cookie = 0
436 | time.sleep(45)
437 | s1 = getHtml(domain, cookies[cookie], url, t2, t1)
438 | if unpaused is not None:
439 | unpaused.wait()
440 | if f1 is None or s1 is None:
441 | return (url, -1, cookies)
442 | f = f1[1]
443 | s = s1[1]
444 | preview_html_first = f1[2]
445 | preview_html_second = s1[2]
446 |
447 | compare(f, s)
448 |
449 | a1 = f.xpath("//article")
450 | showDiff = True
451 |
452 | if len(a1) == 0:
453 | a1 = f.xpath("//section[@class=\"message\"]")
454 | copy1 = copy.deepcopy(a1)
455 | preview_html_first = None
456 | showDiff = False
457 | else:
458 | copy1 = copy.deepcopy(a1)
459 | for img in copy1[0].xpath("//img"):
460 | del img.attrib["src"]
461 | # if preview_html_first is not None:
462 | # copy1[0].append(copy.deepcopy(preview_html_first).xpath("//div[@class='page-preview']")[0])
463 |
464 | a2 = s.xpath("//article")
465 | if len(a2) == 0:
466 | a2 = s.xpath("//section[@class=\"message\"]")
467 | copy2 = copy.deepcopy(a2)
468 | preview_html_second = None
469 | if not showDiff:
470 | showDiff = False
471 | else:
472 | if not showDiff:
473 | showDiff = True
474 | copy2 = copy.deepcopy(a2)
475 | for img in copy2[0].xpath("//img"):
476 | del img.attrib["src"]
477 | # if preview_html_second is not None:
478 | # copy2[0].append(copy.deepcopy(preview_html_second).xpath("//div[@class='page-preview']")[0])
479 |
480 | first_gen = etree.tostring(copy1[0], pretty_print=True, encoding='UTF-8').decode("utf-8")
481 | second_gen = etree.tostring(copy2[0], pretty_print=True, encoding='UTF-8').decode("utf-8")
482 |
483 | # first_gen = re.sub(r"\s*?(
)", "\\1", first_gen)
484 | # first_gen = re.sub(r">\s*<", "><", first_gen, flags=re.S)
485 | # second_gen = re.sub(r"\s*?()", "\\1", second_gen)
486 | # second_gen = re.sub(r">\s*<", "><", second_gen, flags=re.S)
487 |
488 | diff = difflib.HtmlDiff(wrapcolumn=90).make_file(first_gen.split("\n"), second_gen.split("\n"))
489 | htmlparser = etree.HTMLParser(remove_blank_text=True)
490 | tree = etree.parse(StringIO(str(diff)), htmlparser)
491 |
492 | frame1_link = f.xpath("//head/link")
493 | frame1_link[0].attrib["href"] = "https://ivwebcontent.telegram.org{}".format(frame1_link[0].attrib["href"])
494 | frame1_script = f.xpath("//head/script[@src]")
495 | frame1_script[0].attrib["src"] = "../../instantview-frame.js"
496 |
497 | tree.xpath("//head")[0].append(frame1_link[0])
498 | tree.xpath("//head")[0].append(frame1_script[0])
499 |
500 | htmlparser = etree.HTMLParser(remove_blank_text=True, encoding='utf-8')
501 | append = etree.parse(open("append.html", "r", encoding='utf8'), htmlparser)
502 |
503 | frames = append.xpath("//div[contains(@id, 'frame')]")
504 | frames[0].append(a1[0])
505 | frames[1].append(a2[0])
506 |
507 | previews = append.xpath("//div[contains(@id, 'preview')]")
508 | if preview_html_first is not None:
509 | previews[0].append(preview_html_first.xpath("//div[@class='page-preview']")[0])
510 | if preview_html_second is not None:
511 | previews[1].append(preview_html_second.xpath("//div[@class='page-preview']")[0])
512 |
513 | first_link = append.xpath("//a[@id='first_template']")[0]
514 | first_link.attrib["href"] = f1[0]
515 | first_link.text = "Template {}\n".format(t1)
516 |
517 | second_link = append.xpath("//a[@id='second_template']")[0]
518 | second_link.attrib["href"] = s1[0]
519 | second_link.text = "Template {}\n".format(t2)
520 |
521 | append.xpath("//input")[0].attrib["value"] = url
522 |
523 | tree.xpath("//body//table")[0].addprevious(append.xpath("//main/div[1]")[0])
524 | tree.xpath("//body//table")[0].addnext(append.xpath("//main/div[1]")[0])
525 |
526 | for bad in tree.xpath("//table[@summary='Legends']"):
527 | bad.getparent().remove(bad)
528 | final = etree.tostring(tree, pretty_print=True).decode("utf-8")
529 | if unpaused is not None:
530 | unpaused.wait()
531 |
532 | # ДУМОТЬ ВСО ЕСЧО ВПАДЛУ
533 | # ХТО ЗАРЖАВ СТАВ РОФЛАН ЇБАЛО
534 | if showDiff and ("class=\"diff_add\"" in final or "class=\"diff_chg\"" in final or "class=\"diff_sub\"" in final):
535 | print(Back.LIGHTGREEN_EX + "D" + Style.RESET_ALL, end="")
536 | md = md5()
537 | md.update(url.encode('utf-8'))
538 |
539 | fn = "gen/{}/{}_{}_{}.html".format(domain, "t1", "t2", str(md.hexdigest()))
540 | try:
541 | os.makedirs(os.path.dirname(fn))
542 | except Exception:
543 | pass
544 | file = open(fn, "w")
545 | file.write(final)
546 | file.close()
547 | if not nobrowser:
548 | browser = webbrowser if browser == "" else webbrowser.get(browser)
549 | browser.open_new_tab("file:///{}/{}".format(os.getcwd(), fn))
550 | return (url, 1, cookies)
551 | else:
552 | print(Back.LIGHTGREEN_EX + " " + Style.RESET_ALL, end="")
553 | return (url, 0, cookies)
554 | except Exception as ex:
555 | raise ex
556 | print(ex)
557 | return (url, -2, cookies)
558 |
559 |
560 | def parseCookies(cookiesFile):
561 | c = open(cookiesFile, "r")
562 | cl = c.read()
563 | c.close()
564 |
565 | cookie = SimpleCookie()
566 | cookie.load(cl)
567 |
568 | cookies = {}
569 | for key, morsel in cookie.items():
570 | cookies[key] = morsel.value
571 | return cookies
572 |
573 |
574 | if __name__ == "__main__":
575 | parser = argparse.ArgumentParser(description='Get pretty HTML diff between two IV templates.')
576 | parser.add_argument('t1', metavar='first_template', type=str, help='first template number OR template file path')
577 | parser.add_argument('t2', metavar='second_template', type=str, help='second template number OR template file path')
578 | parser.add_argument('url', metavar='url', nargs='+', type=str, help='original page url to diff')
579 | parser.add_argument('--cookies', '-c', help='path to file with cookies (default is cookies.txt)', nargs='?', default="cookies.txt")
580 | parser.add_argument('--nobrowser', '-n', help='do not open browser when diff is found', action='store_true')
581 | parser.add_argument('--browser', '-b', help='browser or path to program to open diff', nargs='?', default="")
582 |
583 | args = parser.parse_args()
584 | for i in args.url:
585 | cookies = parseCookies(args.cookies)
586 |
587 | checkDiff(args.nobrowser, cookies, i, args.t1, args.t2, args.browser)
588 |
--------------------------------------------------------------------------------
/delayed_userscript.js:
--------------------------------------------------------------------------------
1 | // ==UserScript==
2 | // @name delayed issues
3 | // @namespace http://tampermonkey.net/
4 | // @version 0.1
5 | // @description try to take over the world!
6 | // @author You
7 | // @match *://instantview.telegram.org/*
8 | // @match *://instantview.telegram.org/contest/*/template*?url=*
9 | // @match *://instantview.telegram.org/contest/*/template*
10 | // @grant window
11 | // @grant GM_xmlhttpRequest
12 | // @grant GM_addStyle
13 |
14 | // ==/UserScript==
15 |
16 |
17 | (function() {
18 | 'use strict';
19 | const url = "http://127.0.0.1:5000";
20 |
21 | GM_addStyle (`
22 | .rainbow{font-family:Pacifico,cursive;text-shadow:1px 1px 1px #000;font-size:25px;animation:rainbow 7s infinite}@-webkit-keyframes rainbow{0%{color:orange}10%{color:purple}20%{color:red}30%{color:#5f9ea0}40%{color:#ff0}50%{color:coral}60%{color:green}70%{color:#0ff}80%{color:#ff1493}90%{color:#1e90ff}100%{color:orange}}@-ms-keyframes rainbow{0%{color:orange}10%{color:purple}20%{color:red}30%{color:#5f9ea0}40%{color:#ff0}50%{color:coral}60%{color:green}70%{color:#0ff}80%{color:#ff1493}90%{color:#1e90ff}100%{color:orange}}@keyframes rainbow{0%{color:orange}10%{color:purple}20%{color:red}30%{color:#5f9ea0}40%{color:#ff0}50%{color:coral}60%{color:green}70%{color:#0ff}80%{color:#ff1493}90%{color:#1e90ff}100%{color:orange}}
23 | .iv-logo .iv-icon{ background-image: url(https://yt3.ggpht.com/a-/AAuE7mB0JS7Kt0BpqFZ_TSAIn4VF8mlThOWjIDUF=s900-mo-c-c0xffffffff-rj-k-no) !important;
24 | background-size: 20px 20px;
25 | background-repeat: no-repeat; background-position: 0px 0px !important; }
26 | .wrapper{height:100%;width:100%;left:0;right:0;top:0;bottom:0;position:absolute;background:linear-gradient(124deg,#ff2400,#e81d1d,#e8b71d,#e3e81d,#1de840,#1ddde8,#2b1de8,#dd00f3,#dd00f3);background-size:1800% 1800%;-webkit-animation:rainbow2 18s ease infinite;-z-animation:rainbow2 18s ease infinite;-o-animation:rainbow2 18s ease infinite;animation:rainbow2 18s ease infinite}@-webkit-keyframes rainbow2{0%{background-position:0 82%}50%{background-position:100% 19%}100%{background-position:0 82%}}@-moz-keyframes rainbow2{0%{background-position:0 82%}50%{background-position:100% 19%}100%{background-position:0 82%}}@-o-keyframes rainbow2{0%{background-position:0 82%}50%{background-position:100% 19%}100%{background-position:0 82%}}@keyframes rainbow2{0%{background-position:0 82%}50%{background-position:100% 19%}100%{background-position:0 82%}}
27 | `);
28 |
29 |
30 |
31 | function error(e) {
32 | showAlert("Failed to contact delay server. Is it really enabled?");
33 | };
34 | var editor = $("#rules-field");
35 | if(editor.length > 0) {
36 | GM_xmlhttpRequest({
37 | method: "GET",
38 | url: url + "/snippets",
39 | onload: function(e) {
40 | try {
41 | var json = JSON.parse(e.response);
42 | if(json["status"] != "ok") {
43 | showAlert("Error! " + JSON.stringify(json));
44 | return;
45 | }
46 | var snippets = json["snippets"];
47 | console.log(snippets);
48 |
49 |
50 | var value = App.editor.getValue();
51 | App.editor.setValue(value.replace(/(?=^## \n).*?## \n/gms, "@snippet: \"$1\"\n"));
52 |
53 | var _oldProcessRules = processRules;
54 | unsafeWindow.onbeforeunload = function(){};
55 | unsafeWindow.processRules = function processRules(rules) {
56 | if(rules.includes("## \n).*?## \n/gms, "@snippet: \"$1\"\n");
59 | App.editor.setValue(rules);
60 | App.editor.setCursor(c);
61 | }
62 | rules = rules.replace(/^@snippet\s*?:\s*?(".*?")$/gms, "## \n{code}\n## ");
63 | console.log("old = " + rules);
64 | if(rules.length > 5) {
65 | var lines = rules.split('\n');
66 | var newRules = "";
67 | var nextSnippet = "";
68 | for(var i = 0; i < lines.length; i++){
69 | if(lines[i].startsWith("## $/.exec(lines[i])[1]];
74 | if(nextSnippet == undefined) {
75 | nextSnippet = "@ERROR_NO_SUCH_SNIPPET";
76 | }
77 | console.log("nextsnippet = " + nextSnippet);
78 | }
79 | } else if(nextSnippet != "") {
80 | if(lines[i] == "{code}") {
81 | newRules += nextSnippet + "\n";
82 | continue;
83 | }
84 | }
85 | newRules += lines[i] + (i == lines.length - 1 ? "" : "\n");
86 | }
87 | console.log(newRules);
88 | return _oldProcessRules(newRules);
89 | }
90 | }
91 | processRules(App.editor.getValue());
92 | } catch(e) {
93 | showAlert("Error while parsing json: " + e);
94 | }
95 | },
96 | onerror: error,
97 | ontimeout: error,
98 | onabort: error
99 | });
100 | }
101 |
102 | var logo = $(".dev_side_image img");
103 | if(logo.length > 0){
104 | logo.attr("src", "https://i.imgur.com/5TGMjSl.png");
105 | }
106 | var header = $(".header-wrap");
107 | var urlform = $("#url-form");
108 | if(header.length > 0 && urlform.length > 0 && urlform.attr("action").includes("/contest/")) {
109 | header.append(``);
110 | header.find("button#diff").click(function() {
111 | var request = {
112 | section: App.state.section,
113 | url: App.state.result_url,
114 | rules_id: App.state.rules_id || 0
115 | };
116 | GM_xmlhttpRequest({
117 | method: "POST",
118 | url: url + "/diff",
119 | data: JSON.stringify(request),
120 | headers: {
121 | "Content-Type": "application/json"
122 | },
123 | onload: function(e) {
124 | try {
125 | var json = JSON.parse(e.response);
126 | if(json["status"] != "ok") {
127 | showAlert("Error! " + JSON.stringify(json));
128 | return;
129 | }
130 | showAlert("Success! has diff = " + json["has_diff"]);
131 | } catch(e) {
132 | showAlert("Error while parsing json: " + e);
133 | }
134 | },
135 | onerror: error,
136 | ontimeout: error,
137 | onabort: error
138 | });
139 | });
140 | }
141 | if(header.length > 0 && $("#url-mark-btn").length > 0) {
142 | var url_contest = document.URL.replace("/my/", "/contest/");
143 | header.append(`Contest`);
144 | header.append(``);
145 | header.append(``);
146 | header.find("button#undiff").click(function() {
147 | var request = {
148 | section: App.state.section
149 | };
150 | GM_xmlhttpRequest({
151 | method: "POST",
152 | url: url + "/undiff",
153 | data: JSON.stringify(request),
154 | headers: {
155 | "Content-Type": "application/json"
156 | },
157 | onload: function(e) {
158 | try {
159 | var json = JSON.parse(e.response);
160 | if(json["status"] != "ok") {
161 | showAlert("Error! " + JSON.stringify(json));
162 | return;
163 | }
164 | showAlert("ok");
165 | location.reload();
166 | } catch(e) {
167 | showAlert("Error while parsing json: " + e);
168 | }
169 | },
170 | onerror: error,
171 | ontimeout: error,
172 | onabort: error
173 | });
174 | });
175 | header.find("button#download").click(function() {
176 | var request = {
177 | url: App.state.result_url
178 | };
179 | GM_xmlhttpRequest({
180 | method: "POST",
181 | url: url + "/download",
182 | data: JSON.stringify(request),
183 | headers: {
184 | "Content-Type": "application/json"
185 | },
186 | onload: function(e) {
187 | try {
188 | var json = JSON.parse(e.response);
189 | if(json["status"] != "ok") {
190 | showAlert("Error! " + JSON.stringify(json));
191 | return;
192 | }
193 | showAlert("ok");
194 | } catch(e) {
195 | showAlert("Error while parsing json: " + e + "\n" + e.response);
196 | }
197 | },
198 | onerror: error,
199 | ontimeout: error,
200 | onabort: error
201 | });
202 | });
203 | }
204 | var username = $(".logged-name");
205 | if(username.length > 0) {
206 | var realName = username.text();
207 | username.text("durov");
208 | username.addClass("rainbow");
209 | var winner = $("div.contest-winner-row");
210 | if(winner.length > 0) {
211 | $("header").css("background-color", "transparent");
212 | if(winner.find(".contest-item-author a").text() == realName) {
213 | $("body").addClass("wrapper");
214 | $("a.status-winner").css("font-size", "1.5em").addClass("rainbow");
215 | // stupid me decided to put the song on the hosting that removes it after 14 days
216 | $("body").append(``);
217 | $(".list-group-contest-item").each(function() {
218 | if($(this).find(".contest-item-author a").text() != realName) {
219 | $(this).find(".contest-item-author a").text("SUCKER!");
220 | } else {
221 | $(this).find(".contest-item-author a").text("Величайший " + realName + "!");
222 | }
223 | });
224 | $("body").append(``);
225 | } else {
226 | if($(".list-group-contest-item").filter(function() {
227 | return $(this).find(".contest-item-author a").text() == realName;
228 | }).length > 0) {
229 | $("body").append(``);
230 | $("head").append(``);
231 | $("body").css("background-color", "#000");
232 | $(".about-text").append(`GAME OVER
You cannot give up just yet...
press X to appeal
`);
233 | $("body").css("font-family", "Determination Mono");
234 | $(document).keypress(function(e) {
235 | console.log(e.which);
236 | if(e.which == 120 || e.which == 1095 || e.which == 1063 || e.which == 88) {
237 | // X pressed
238 | // sorry Ricky xD
239 | window.open("https://t.me/d_Rickyy_b");
240 | }
241 | });
242 | }
243 | }
244 | }
245 | }
246 |
247 |
248 |
249 | var section = $(".contest-section");
250 | var button = $("div.report-issue-block").find(".issue-send-button");
251 |
252 | // DELAYED LIST
253 | if(button.length == 0 && $(".section-url-field-wrap").length > 0) {
254 | section.append(`Delayed issues
`);
255 | section.append(``);
256 | var issues = $("#delayed");
257 | var rule = /#(\d+)/.exec(section.find("h3").first().text())[1];
258 | var sectionId = /contest\/(.*?)\//.exec(document.URL)[1];
259 | GM_xmlhttpRequest({
260 | method: "GET",
261 | url: url + "/list",
262 | onload: function(e) {
263 | try {
264 | var json = JSON.parse(e.response);
265 | if(json["status"] != "ok") {
266 | showAlert("Error! " + JSON.stringify(json));
267 | return;
268 | }
269 | if(json["list"][sectionId] != undefined && json["list"][sectionId][rule] != undefined) {
270 | $("#delayedCount").text(json["list"][sectionId][rule].length);
271 | json["list"][sectionId][rule].forEach(function(i, index) {
272 | var appended = $(`
273 |
276 |
${i["reportTime"]}
277 |
278 |
279 |
280 |
`).appendTo(issues);
281 | //i.key = index;
282 | appended.find("button").attr("data-json", JSON.stringify(i));
283 | appended.find(".issue-btn").click(function() {
284 | console.log($(this).data("json"));
285 | GM_xmlhttpRequest({
286 | method: "POST",
287 | url: url + ($(this).hasClass("remove-btn") ? "/remove" : "/post_now"),
288 | data: JSON.stringify($(this).data("json")),
289 | headers: {
290 | "Content-Type": "application/json"
291 | },
292 | onload: function(e) {
293 | console.log(e.response);
294 | try {
295 | var json = JSON.parse(e.response);
296 | if(json["status"] != "ok") {
297 | showAlert("Error! " + JSON.stringify(json));
298 | return;
299 | }
300 | showAlert("Success!");
301 | } catch(e) {
302 | showAlert("Error while parsing json: " + e);
303 | }
304 | },
305 | onerror: error,
306 | ontimeout: error,
307 | onabort: error
308 | });
309 | });
310 | });
311 | }
312 | } catch(e) {
313 | showAlert("Error while parsing json: " + e);
314 | }
315 | },
316 | onerror: error,
317 | ontimeout: error,
318 | onabort: error
319 | });
320 | }
321 |
322 | // LIST ALL ISSUES
323 | if(/candidate(\d+)/.exec(document.URL) != null) {
324 | section.first().append(`Waiting issues
`);
325 | section.first().append(``);
326 | var issuesWaiting = $("#waiting");
327 | var candidateId = /candidate(\d+)/.exec(document.URL)[1];
328 | var sect = /contest\/(.*?)\//.exec(document.URL)[1];
329 | var listNumbers = $.makeArray($(".contest-item-num").map(function() {
330 | return /template(\d+)/.exec($(this).find("a").attr("href"))[1]
331 | }));
332 | var request = {
333 | section: sect,
334 | rules: listNumbers
335 | };
336 | console.log(request);
337 | GM_xmlhttpRequest({
338 | method: "POST",
339 | data: JSON.stringify(request),
340 | headers: {
341 | "Content-Type": "application/json"
342 | },
343 | url: url + "/get_issues",
344 | onload: function(e) {
345 | try {
346 | var json = JSON.parse(e.response);
347 | if(json["status"] != "ok") {
348 | showAlert("Error! " + JSON.stringify(json));
349 | return;
350 | }
351 |
352 | if(json["list"] != undefined && json["list"].length > 0) {
353 | var list = json["list"];
354 | $("#waitingCount").text(list.length);
355 | list.forEach(function(i, index) {
356 | var appended = $(`
357 |
360 |
361 |
362 |
363 |
`).appendTo(issuesWaiting);
364 | //i.key = index;
365 | appended.find("button").attr("data-json", JSON.stringify(i));
366 | appended.find(".issue-btn").click(function() {
367 | console.log($(this).data("json"));
368 | GM_xmlhttpRequest({
369 | method: "POST",
370 | url: url + ($(this).hasClass("remove-btn") ? "/remove_all" : "/post_now"),
371 | data: JSON.stringify($(this).data("json")),
372 | headers: {
373 | "Content-Type": "application/json"
374 | },
375 | onload: function(e) {
376 | console.log(e.response);
377 | try {
378 | var json = JSON.parse(e.response);
379 | if(json["status"] != "ok") {
380 | showAlert("Error! " + JSON.stringify(json));
381 | return;
382 | }
383 | showAlert("Success!");
384 | } catch(e) {
385 | showAlert("Error while parsing json: " + e);
386 | }
387 | },
388 | onerror: error,
389 | ontimeout: error,
390 | onabort: error
391 | });
392 | });
393 | });
394 | }
395 | } catch(e) {
396 | showAlert("Error while parsing json: " + e);
397 | }
398 | },
399 | onerror: error,
400 | ontimeout: error,
401 | onabort: error
402 | });
403 | }
404 |
405 | if(button.length > 0) {
406 | $(document).on('iv:result:updated', function() {
407 | var button = $("div.report-issue-block:not(.hide)").find(".issue-send-button");
408 |
409 | button.text("Send issue now");
410 | button.parent().append(``);
411 | button.parent().append(``);
412 | var delay = $("#delay");
413 | var add_issue = $("#add_issue");
414 |
415 | delay.click(function() {
416 | var form = button.closest("form");
417 |
418 | var comment = form.find('textarea[name="comment"]').val();
419 | var type = form.find('input[name="type"]').val();
420 |
421 | var request = {
422 | url: App.state.result_url,
423 | section: App.state.section,
424 | rules_id: App.state.rules_id || 0,
425 | random_id: App.state.random_id || '',
426 | issue_id: App.state.issue_id || 0,
427 | regions: App.state.originalRegions + ';' + App.state.resultRegions,
428 | type: type,
429 | comment: comment
430 | };
431 |
432 | GM_xmlhttpRequest({
433 | method: "POST",
434 | url: url + "/report",
435 | data: JSON.stringify(request),
436 | headers: {
437 | "Content-Type": "application/json"
438 | },
439 | onload: function(e) {
440 | try {
441 | var json = JSON.parse(e.response);
442 | if(json["status"] != "ok") {
443 | showAlert("Error! " + JSON.stringify(json));
444 | return;
445 | }
446 | showAlert("Report successfully scheduled, will be posted in " + json["date"]);
447 | } catch(e) {
448 | showAlert("Error while parsing json: " + e);
449 | }
450 | },
451 | onerror: error,
452 | ontimeout: error,
453 | onabort: error
454 | });
455 | });
456 |
457 | add_issue.click(function() {
458 | var form = button.closest("form");
459 |
460 | var comment = form.find('textarea[name="comment"]').val();
461 | var type = form.find('input[name="type"]').val();
462 |
463 | var request = {
464 | url: App.state.result_url,
465 | section: App.state.section,
466 | rules_id: App.state.rules_id || 0,
467 | random_id: App.state.random_id || '',
468 | issue_id: App.state.issue_id || 0,
469 | regions: App.state.originalRegions + ';' + App.state.resultRegions,
470 | type: type,
471 | comment: comment
472 | };
473 |
474 | GM_xmlhttpRequest({
475 | method: "POST",
476 | url: url + "/add_issue",
477 | data: JSON.stringify(request),
478 | headers: {
479 | "Content-Type": "application/json"
480 | },
481 | onload: function(e) {
482 | try {
483 | var json = JSON.parse(e.response);
484 | if(json["status"] != "ok") {
485 | showAlert("Error! " + JSON.stringify(json));
486 | return;
487 | }
488 | showAlert("Report successfully added");
489 | } catch(e) {
490 | showAlert("Error while parsing json: " + e);
491 | }
492 | },
493 | onerror: error,
494 | ontimeout: error,
495 | onabort: error
496 | });
497 | });
498 | });
499 | }
500 | })();
--------------------------------------------------------------------------------