├── img ├── f12.png ├── login-new.png └── login-old.png ├── config.py.bak ├── .gitignore ├── somersa.py ├── hitwlan.py ├── tice.py ├── README.md ├── today.py ├── login.py ├── jwc.py └── some └── index.php /img/f12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tmn07/hit-spider/HEAD/img/f12.png -------------------------------------------------------------------------------- /config.py.bak: -------------------------------------------------------------------------------- 1 | # write your uid&pwd here 2 | username = "xxx" 3 | password = "xxx" -------------------------------------------------------------------------------- /img/login-new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tmn07/hit-spider/HEAD/img/login-new.png -------------------------------------------------------------------------------- /img/login-old.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tmn07/hit-spider/HEAD/img/login-old.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | pic/* 2 | 3 | .idea/* 4 | 5 | __pycache__/* 6 | 7 | *.pyc 8 | 9 | photo.jpg 10 | 11 | *.html 12 | *.txt 13 | 14 | photo.jpg 15 | config.py 16 | -------------------------------------------------------------------------------- /somersa.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | def modpow(b, e, m): 4 | result = 1 5 | while (e > 0): 6 | if e & 1: 7 | result = (result * b) % m 8 | e = e >> 1 9 | b = (b * b) % m 10 | return result 11 | 12 | def str_to_int(string): 13 | n = 0 14 | for i in range(len(string)): 15 | n = n << 8 16 | n += ord(string[i]) 17 | return n 18 | 19 | 20 | def rsa(data, n): 21 | e = '10001' 22 | result = modpow(str_to_int(data), long(e, 16), long(n, 16)) 23 | return hex(result)[2:-1] 24 | 25 | # n = '98289d260169a74317dd3ad91b831623e5589a344848b0ccceb74542212fc2390a13d8f15b037c56eabf2a4ef7b1e06c32c9f6280288373ee23efc87d350056b' 26 | # print rsa("xxx", n) -------------------------------------------------------------------------------- /hitwlan.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import requests 3 | from bs4 import BeautifulSoup 4 | from config import * 5 | 6 | s = requests.Session() 7 | 8 | url = 'http://192.168.52.11/srun_portal_pc.php?ac_id=1&' 9 | 10 | header = { 11 | 'Host': '192.168.52.11', 12 | 'Origin': 'http://192.168.52.11', 13 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/51.0.2704.79 Chrome/51.0.2704.79 Safari/537.36', 14 | 'Referer': 'http://192.168.52.11/srun_portal_pc.php?url=&ac_id=1', 15 | } 16 | 17 | # usernama里@sam前填学号,password填密码 18 | post_data = { 19 | 'action': 'login', 20 | 'username': username, 21 | 'password': password, 22 | 'ac_id': '1', 23 | 'user_ip':'', 24 | 'nas_ip':'', 25 | 'user_mac':'', 26 | 'url':'', 27 | 'save_me':'1' 28 | } 29 | 30 | r = s.post(url, headers=header, data=post_data) 31 | try: 32 | soup = BeautifulSoup(r.text,"lxml") 33 | fs = soup.find("fieldset") 34 | print fs.find("p").text 35 | except Exception, e: 36 | print "login ok" 37 | 38 | 39 | # print(r.text) 40 | 41 | # input() 42 | -------------------------------------------------------------------------------- /tice.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | s = requests.Session() 5 | 6 | 7 | baseurl = "http://210.46.72.143/" 8 | url = "http://210.46.72.143/servlet/adminservlet" 9 | 10 | headers = { 11 | 'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36' 12 | } 13 | 14 | r = s.get(baseurl, headers=headers) 15 | 16 | soup = BeautifulSoup(r.text,'lxml') 17 | random = soup.find_all("input",attrs={"name":"random_form"})[0]['value'] 18 | 19 | data = { 20 | "displayName":'', 21 | "displayPasswd":'', 22 | "submit.x":'53', 23 | "submit.y":'13', 24 | "operType":'911', 25 | "random_form":random, 26 | 'select':'2', 27 | 'userName':'1140340101', 28 | 'passwd':'xxxxxx' 29 | } 30 | 31 | r = s.post(url, headers=headers ,data=data) 32 | 33 | 34 | def main(sid): 35 | sid = str(sid) 36 | print "sid:",sid 37 | # url1 = "http://210.46.72.143/student/studentInfo.jsp" 38 | # r = s.get(url1, data={"userName":sid,"passwd":sid}) 39 | r = s.get("http://210.46.72.143/student/studentInfo.jsp?userName="+sid+"&passwd="+sid) 40 | soup = BeautifulSoup(r.text,'lxml') 41 | print "id:", 42 | print soup.find_all("td")[15].text[1:] 43 | 44 | 45 | r2 = s.get("http://210.46.72.143/student/queryHealthInfo.jsp") 46 | soup = BeautifulSoup(r2.text,'lxml') 47 | 48 | tice = soup.find_all("td") 49 | height = tice[16].text 50 | weight = tice[24].text 51 | 52 | print "height:", height 53 | print "weight:", weight 54 | 55 | 56 | main(1140340101) 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HIT-spider 2 | 3 | ## 目录结构 4 | 5 | - hitwlan.py 登陆HIT-WLAN的脚本(需要自己的账号密码..不是test) 6 | - jwc.py 教务处上搞事情的脚本 7 | - 爬成绩 8 | - 爬照片 9 | - 选课,退课 10 | - login.py 新版认证登录 11 | - today.py 今日哈工大刷票 12 | - tice.py 爬体测网站(可得到身份证身高体重等..) 13 | - somersa.py 知乎上找到某rsa加密 14 | 15 | ## 说明 16 | 根目录下config.py.bak重命名为config.py,然后将里面的学号,密码填上,才能正常运行 17 | 18 | 学校的网站,可能发生变化,,所以这些东西都是有时效性的。 19 | 20 | 第一次登录可能会失败,原因不知,再跑一次就没有问题。还有,文化素质课是新设的,不确定可以。愿意测试的联系我。qq:519043202 21 | 22 | 大家可以给我发issue来提意见。。比如说想做个爬某个班的成绩,爬课表啥的。。 23 | 24 | 前前段时间作者被查水表了。向教务处供出了选课bug,所以以后可能被修复.... 25 | 26 | 前段时间作者又被查水表了,请大家一定要注意密码保护,不要泄露哈工大相关的个人密码在网络上。然后本人已经从哈工大毕业,无法继续维护,并且能力有限代码质量堪忧,请合理使用。 27 | 28 | ### 选课说明 29 | 30 | 调用 31 | 32 | ```python 33 | c.xuanke("2016-2017-2-13SD28002200-001","xx",'100') 34 | ``` 35 | 36 | 第一个参数为课程编号,第二个参数为课程代码。参见楼下(就人文社科特别点..),第三个参数可有可无是用来投权重的。 37 | 38 | 课程有分两种,一种直接点选课的,一种要投权重的(权重其实没必要抢,现在也没条件测试。。so.. 39 | 40 | | 课程类型 | 代码 | 41 | | ------ | ---- | 42 | | 限选 | xx | 43 | | 创新实验 | cxsy | 44 | | 体育 | ty | 45 | | 英语 | yy | 46 | | 人文社科限选 | tsk | 47 | | 创新研修 | cxyx | 48 | | 素质限选 | szxx | 49 | | 素质核心 | szhx | 50 | 51 | 52 | 有一些特殊的玩法,涉及一点不好的事,所以就不公开了,有意者私聊。比如说~~非选课时间选课~~。 53 | 54 | 退课时需要修改学期。比如说下面第四行的。代表2016-2017学年第二个学期。以此类推。 55 | 56 | ```python 57 | # 第100行开始 58 | data = { 59 | 'rwh' : cid, 60 | 'pageXklb' : lb, 61 | 'pageXnxq':'2016-20172', 62 | 'pageNj': '', 63 | 'pageYxdm': '', 64 | 'pageZydm': '', 65 | 'pageKcmc': '', 66 | } 67 | ``` 68 | 69 | ## 刷票说明 70 | 71 | shua的第一个参数为新闻页面链接,第二个参数是刷票次数。 72 | 73 | 成功次数不定。基本还是很好用的。 74 | 75 | 没有试过大量刷票,自重。。 76 | 77 | ## 体测网站爬虫说明 78 | 79 | 现在以1140340101账号登录,这个同学没有来哈工大读书但是这个账号还留着。不要随便改密码,谢谢。 80 | 81 | 只能爬取没有修改默认密码的人的账号,且目前没有做异常处理~(懒 82 | 83 | 在第27,28行输入你的账号密码(或者用我提供的1140340101账号不要改 84 | 85 | main(xxxxxx) xxxxx为你想爬取的学号即可。结果如下 86 | 87 | ``` 88 | sid: 1140340116 89 | id: 35018xxxxxxx8195X 90 | height: 173.6 91 | weight: 66.0 92 | ``` 93 | 94 | 95 | ## 详细说明 96 | 97 | 见我的博客,[正在编写ing..](http://tmn07.com/hexo) -------------------------------------------------------------------------------- /today.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Dec 25 19:45:39 2015 4 | @author: Tmn07 5 | """ 6 | 7 | import random 8 | import time 9 | import requests 10 | from fake_useragent import UserAgent 11 | from bs4 import BeautifulSoup 12 | 13 | def get_recommend_url(page_url): 14 | ''' 15 | 获取"推荐本文"对应的链接 16 | ''' 17 | page = requests.get(page_url).content # requests.get 获取页面 18 | soup = BeautifulSoup(page, 'html.parser') # BeautifulSoup 解析页面 19 | iframe_url = soup.find_all('center')[1].iframe['src'] # 获取 iframe 链接 20 | recommend_url = 'http://today.hit.edu.cn' + iframe_url.replace('0.htm','1.htm') 21 | return recommend_url 22 | 23 | def get_recommend_count(page_url): 24 | ''' 25 | 获取新闻推荐数 26 | ''' 27 | page = requests.get(page_url).content 28 | soup = BeautifulSoup(page, 'html.parser') 29 | iframe_url = 'http://today.hit.edu.cn' + soup.find_all('center')[1].iframe['src'] 30 | iframe_code = requests.get(iframe_url).content 31 | iframe_soup = BeautifulSoup(iframe_code, 'html.parser') 32 | recommend_count = iframe_soup.find_all('div',{'class','topBox'})[0].text 33 | recommend_count = recommend_count.replace('\t','').replace('\r','').replace('\n','') 34 | return int(recommend_count) 35 | 36 | def ip_generator(): 37 | ''' 38 | 生成随机的IP地址 39 | ''' 40 | a = random.randint(1,255) 41 | b = random.randint(0,255) 42 | c = random.randint(0,255) 43 | d = random.randint(0,255) 44 | ipAddress = "%d.%d.%d.%d" % (a,b,c,d) 45 | print ipAddress 46 | return ipAddress 47 | 48 | def shua(page_url,k): 49 | url = get_recommend_url(page_url) # 获取"推荐本文"的链接 50 | for i in range(k): 51 | headers = { "User-Agent": UserAgent().random, "X-Forwarded-For": ip_generator() } 52 | try: 53 | request = requests.get(url, headers = headers) 54 | except requests.exceptions.RequestException, e: 55 | print e.code 56 | 57 | time.sleep(random.random()) 58 | print i+1 59 | 60 | 61 | page_url = "http://today.hit.edu.cn/news/2017/06-05/4762308160RL1.htm" # 新闻页面的链接 62 | print get_recommend_count(page_url) # 刷票前推荐数 63 | shua(page_url,3) 64 | print get_recommend_count(page_url) # 刷票后推荐数 65 | -------------------------------------------------------------------------------- /login.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import re 3 | import requests 4 | from bs4 import BeautifulSoup 5 | 6 | from config import * 7 | from somersa import rsa 8 | 9 | def write_down(data, filename='test.html'): 10 | fp = open(filename, 'w') 11 | fp.write(data) 12 | fp.close() 13 | print('write down ok') 14 | 15 | 16 | def login(uid, pwd): 17 | # 前往认证登录页面 18 | url = "https://ids.hit.edu.cn/authserver/login" 19 | 20 | s = requests.Session() 21 | 22 | r = s.get(url, timeout=None) 23 | 24 | soup = BeautifulSoup(r.text, 'lxml') 25 | 26 | login_form = soup.find('form', id="casLoginForm") 27 | 28 | # print(login_form) 29 | 30 | # 获取登录所需要的在页面表单中的隐藏信息 31 | hidden_inputs = login_form.find_all('input', type='hidden') 32 | 33 | hidden_data = {} 34 | 35 | for i in hidden_inputs: 36 | hidden_data[i['name']] = i['value'] 37 | 38 | # write_down(r.content) 39 | 40 | # 加入学号,密码 41 | hidden_data['username'] = uid 42 | hidden_data['password'] = pwd 43 | 44 | # 构造请求头 45 | header = { 46 | 'Referer': 'https://ids.hit.edu.cn/authserver/login', 47 | 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36' 48 | } 49 | # header = { 50 | # 'Accept':'*/*', 51 | # 'Accept-Encoding':'gzip, deflate, sdch', 52 | # 'Accept-Language':'zh-CN,zh;q=0.8', 53 | # 'Connection':'keep-alive', 54 | # 'Host':'jwts.hit.edu.cn', 55 | # 'Cookie':'JSESSIONID=xkQLYT6Khvy59vfp1fyx3dXh1htt9v7h959sxLmhLYh0bcJxvGcn!-1677291570; clwz_blc_pst=16781484.23323; name=value', 56 | # 'Referer':'http//:jwts.hit.edu.cn/', 57 | # 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36' 58 | # } 59 | # 发送登录信息 60 | r = s.post('https://ids.hit.edu.cn/authserver/login', headers=header, data=hidden_data) 61 | 62 | try: 63 | soup = BeautifulSoup(r.text, 'lxml') 64 | err = soup.find(id='msg') 65 | if err: 66 | print(err.text) 67 | else: 68 | print('login ok') 69 | except Exception, e: 70 | pass 71 | 72 | return s 73 | 74 | def old_login(uid, pwd): 75 | f=open("test.html") 76 | data=f.read() 77 | n = eval(re.findall("KeyPair(.*?);", data)[0])[2] 78 | post_data = {} 79 | post_data['username'] = rsa(uid, n) 80 | post_data['password'] = rsa(pwd, n) 81 | print post_data 82 | 83 | def get_login_style(): 84 | # 前往页面,查看教务处登录方式 85 | url = "http://jwts.hit.edu.cn/" 86 | data = { 87 | 'Accept':'*/*', 88 | 'Accept-Encoding':'gzip, deflate, sdch', 89 | 'Accept-Language':'zh-CN,zh;q=0.8', 90 | 'Connection':'keep-alive', 91 | 'Host':'jwts.hit.edu.cn', 92 | 'Referer':'http//:jwts.hit.edu.cn/', 93 | 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36' 94 | } 95 | r = requests.get(url,headers=data) 96 | soup = BeautifulSoup(r.text, 'lxml') 97 | 98 | login_url = soup.find('a', id='dl')['href'] 99 | # http://jwts.hit.edu.cn/loginCAS 100 | if login_url == '/loginLdapQian': 101 | # 旧版教务处登录 102 | print "old" 103 | return 0 104 | else: 105 | # 新版统一认证登录? 106 | print "new" 107 | return 1 108 | 109 | 110 | if __name__ == '__main__': 111 | # 第一个参数学号,第一个参数密码 112 | # s = login(username, password) 113 | 114 | # 前往其他网站,验证登录 115 | # test_url = "https://cms.hit.edu.cn/my/" 116 | # r = s.get(test_url, verify=False) 117 | 118 | # write_down(r.content) 119 | 120 | # print (get_login_style()) 121 | old_login(username, password) 122 | -------------------------------------------------------------------------------- /jwc.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import requests 3 | from bs4 import BeautifulSoup 4 | from login import * 5 | import time 6 | 7 | from config import * 8 | 9 | 10 | class hit_jwts(object): 11 | """ 程序主类 12 | """ 13 | 14 | def __init__(self, uid, pwd): 15 | """初始化函数 16 | """ 17 | url = "http://jwts.hit.edu.cn/loginLdap" 18 | 19 | # 不能100%确定.... 20 | ltype = get_login_style() 21 | 22 | if ltype == 0: 23 | s = self.login(uid, pwd) 24 | else: 25 | s = login(uid, pwd) 26 | test_url = "http://jwts.hit.edu.cn/loginCAS" 27 | s.get(test_url) 28 | print("test ok") 29 | self.s = s 30 | 31 | def login(self, uid, pwd): 32 | s = requests.Session() 33 | url = 'http://jwts.hit.edu.cn/loginLdap' 34 | header = { 35 | 'Host': 'jwts.hit.edu.cn', 36 | 'Origin': 'http//jwts.hit.edu.cn', 37 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/51.0.2704.79 Chrome/51.0.2704.79 Safari/537.36', 38 | 'Referer': 'http//jwts.hit.edu.cn/loginLdapQian' 39 | } 40 | post_data = { 41 | 'usercode': uid, 42 | 'password': pwd 43 | } 44 | r = s.post(url, headers=header, data=post_data) 45 | 46 | try: 47 | if len(r.headers['Set-Cookie']) > 20: 48 | print('login ok') 49 | else: 50 | print('login fail') 51 | exit() 52 | except Exception, e: 53 | pass 54 | 55 | return s 56 | 57 | def write_down(self, data, filename, mode='w'): 58 | with open(filename, mode) as f: 59 | f.write(data) 60 | print(filename + ' write down ok') 61 | 62 | def score(self): 63 | # post_data = { 64 | # 'pageNo': 1, 65 | # 'pageSize': 250, 66 | # 'pageCount': 1 67 | # } 68 | post_data = { 69 | 'pageXnxq': "2015-20161", 70 | } 71 | r = self.s.post('http://jwts.hit.edu.cn/cjcx/queryQmcj', data=post_data) 72 | # r = self.s.post('http://jwts.hit.edu.cn/cjcx/queryQmcj', headers=header, data=post_data) 73 | if r.status_code == 200: 74 | print('get score ok') 75 | self.write_down(r.content, 'score.html') 76 | else: 77 | print('get score fail') 78 | exit() 79 | 80 | def getPhoto(self, uid): 81 | url = "http://jwts.hit.edu.cn/xswhxx/showPhoto?xh=" + str(uid) 82 | r = self.s.get(url) 83 | self.write_down(r.content, 'photo.jpg', 'wb') 84 | 85 | def xuanke(self,cid,lb,qz=""): 86 | url = "http://jwts.hit.edu.cn/xsxk/saveXsxk" 87 | data = { 88 | 'rwh' : cid, 89 | 'pageXklb':'xx', 90 | 'qz': qz, 91 | } 92 | r = self.s.post(url,data) 93 | return r 94 | 95 | def cjxxview(self): 96 | filep = open("cjxx.txt","w") 97 | # 6666636 98 | start_id = 6669600 99 | baseurl = "http://jwts.hit.edu.cn/cjcx/queryCjxxView?id=" 100 | f = 0 101 | while f<30: 102 | url = baseurl+str(start_id) 103 | r = self.s.get(url) 104 | if r.headers.get('Content-Length',0) == "1842": 105 | # 2472 106 | print start_id,"null" 107 | f+=1 108 | start_id += 1 109 | time.sleep(2) 110 | filep.write(str(start_id)+"\tnull\n") 111 | continue 112 | soup = BeautifulSoup(r.content,"lxml") 113 | data = soup.find_all('td') 114 | print start_id,data[0],data[3],data[7] 115 | filep.write(str(start_id)+str(data[0])+str(data[3])+str(data[7])+"\n") 116 | start_id += 1 117 | time.sleep(2) 118 | 119 | 120 | def gay(self): 121 | import MySQLdb 122 | db = MySQLdb.connect("127.0.0.1","root","root","jwts") 123 | 124 | # 使用cursor()方法获取操作游标 125 | cursor = db.cursor() 126 | 127 | # 6666620 6666794 128 | # 6666294 6670647 129 | start_id = 6670634 130 | # baseurl = "http://jwts.hit.edu.cn/cjcx/queryCjxxView?rwh=2016-2017-1-13GN12000300-017&id=" 131 | 132 | baseurl = "https://vpn.hit.edu.cn/cjcx/,DanaInfo=jwts.hit.edu.cn+queryCjxxView?rwh=2016-2017-1-13GN12000300-017&id=667063" 133 | f = 0 134 | # i = 0 135 | while f<30: 136 | url = baseurl+str(start_id) 137 | r = self.s.get(url) 138 | # 13GN12000300 139 | if r.headers.get('Content-Length',0) == "2472": 140 | # 2472 141 | print start_id,"null" 142 | f+=1 143 | start_id += 1 144 | time.sleep(0.5) 145 | # filep.write(str(start_id)+"\tnull\n") 146 | continue 147 | soup = BeautifulSoup(r.content,"lxml") 148 | data = soup.find_all('td') 149 | # print data[2] 150 | if data[2].string=="13GN12000300": 151 | try: 152 | sql = "INSERT INTO `jwts`.`gay` (`id`, `rwh`, `score`, `item1`, `item2`, `item3`) VALUES('%d', '%s', '%f', '%f', '%f', '%f');" % (start_id , data[0].string[-3:], float(data[7].string),float(data[9].string),float(data[11].string),float(data[13].string)) 153 | # print float(data[7].string) 154 | except Exception, e: 155 | # raise e 156 | if data[7].string == u"缓考": 157 | status = 1 158 | else: 159 | status = 2 160 | sql = "INSERT INTO `jwts`.`gay` (`id`, `rwh`, `status`) VALUES('%d', '%s', '%d');" % (start_id , data[0].string[-3:], status) 161 | print sql 162 | cursor.execute(sql) 163 | 164 | print start_id,data[0],data[3],data[7] 165 | # filep.write(str(start_id)+str(data[0])+str(data[3])+str(data[7])+"\n") 166 | start_id += 1 167 | # i+=1 168 | time.sleep(0.5) 169 | 170 | db.close() 171 | 172 | def tuike(self,cid="",lb=""): 173 | url = "http://jwts.hit.edu.cn/xsxk/saveXstk" 174 | header = { 175 | 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 176 | 'Accept-Encoding':'gzip, deflate', 177 | 'Accept-Language':'zh-CN,zh;q=0.8', 178 | 'Cache-Control':'max-age=0', 179 | 'Connection':'keep-alive', 180 | 'Content-Length':'102', 181 | 'Content-Type':'application/x-www-form-urlencoded', 182 | 'Host':'jwts.hit.edu.cn', 183 | 'Origin':'http://jwts.hit.edu.cn', 184 | 'Referer':'http://jwts.hit.edu.cn/xsxk/queryYxkc', 185 | 'Upgrade-Insecure-Requests':'1', 186 | 'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36' 187 | } 188 | data = { 189 | 'rwh' : cid, 190 | 'pageXklb' : lb, 191 | 'pageXnxq':'2016-20172', 192 | 'pageNj': '', 193 | 'pageYxdm': '', 194 | 'pageZydm': '', 195 | 'pageKcmc': '', 196 | } 197 | r = self.s.post(url,headers=header, data=data) 198 | return r 199 | 200 | 201 | if __name__ == '__main__': 202 | # 第一个参数学号,第二个参数密码 203 | c = hit_jwts('xxxx', 'xxxx') 204 | c.score() 205 | c.getPhoto('1140340116') 206 | 207 | 208 | # c.xuanke("2016-2017-2-13SE28001200-001","xx") 209 | # c.xuanke("2016-2017-1-GO00300400-001","qxrx") 210 | # c.tuike("2016-2017-1-GO00300400-001","qxrx") 211 | -------------------------------------------------------------------------------- /some/index.php: -------------------------------------------------------------------------------- 1 | 81 | 82 | 83 | 84 |
85 |其他(取自目前份数据)
99 |总平均分:
100 |作业平均分:
101 |论文平均分:
102 |期末考平均分:
103 |挂科率:
104 |旷考人数:
105 |缓考人数:
106 |