├── README.md ├── Wallpaper ├── Picture_16px_1171715_easyicon.net.ico ├── Wallpaper.py └── 壁纸下载工具.exe ├── emoji ├── emoji.py └── emoji_bak.py ├── gif ├── 006WkX52gy1fw5l2ipx9cg309q0e61l2.gif ├── GIF图片.exe └── GIF图片.py ├── meizi ├── meizi.png ├── meizi.py └── page.py ├── netbian ├── 4k.exe ├── 4k.py ├── netbian.png └── pictures_48px_1204563_easyicon.net.ico ├── qqmpz ├── qq.exe ├── qq.png ├── qq.py └── qq_32px_1164466_easyicon.net.ico └── translate ├── translate.exe ├── translate.ico ├── translate.png ├── translate.py └── translate_48px_1210171_easyicon.net.ico /README.md: -------------------------------------------------------------------------------- 1 | 10 | ### 一些自学PYTHON时写的垃圾代码 11 | ### 代码渣的一批 12 | 13 | #### GIF妹子图下载 14 | ([点我查看](./gif "点我查看"),下载某网站动态妹子图 15 | 程序截图: 16 | ![GIF](https://github.com/lovebai/Python/blob/master/gif/006WkX52gy1fw5l2ipx9cg309q0e61l2.gif?raw=true "GIF") 17 | 18 | #### 翻译小程序 19 | ([点我查看](./translate "点我查看")),里面有tkinter和终端版的,使用有道翻译的api 20 | 程序截图: 21 | ![窗口](https://github.com/lovebai/Python/blob/master/translate/translate.png?raw=true "窗口") 22 | 23 | ##### 彼岸图下载 24 | ([点我查看](./netbian "点我查看")),http://pic.netbian.com/ 彼岸图网,随机下载,会在程序的目录下生成一个名为4K的文件夹,渣的一批, 25 | 等学好了去爬P站的 26 | 程序截图: 27 | ![彼岸](https://github.com/lovebai/Python/blob/master/netbian/netbian.png?raw=true "彼岸") 28 | 29 | #### 领取QQ名片赞小程序 30 | ([点我查看](./qqmpz "点我查看")),入门学习,代码是真的不忍直视 31 | 程序截图: 32 | ![名片赞](https://github.com/lovebai/Python/blob/master/qqmpz/qq.png?raw=true "名片赞") 33 | 34 | #### 表情包下载 35 | ([点我查看](./emoji "点我查看")),爬取表情包 36 | 程序截图: 37 | ![表情包](https://www.xiaobaibk.com/content/uploadfile/201910/77ca1572011308.png "表情包") 38 | 39 | #### 妹子图下载(http://cos.top15.cn/) 40 | ([点我查看](./meizi "点我查看")),爬取竖屏妹子壁纸 41 | 程序截图: 42 | ![图片](https://github.com/lovebai/Python/blob/master/meizi/meizi.png?raw=true "图片") 43 | 44 | #### Wallpaper壁纸下载优化版 45 | ([点我查看](./Wallpaper "点我查看")),壁纸下载优化版 -------------------------------------------------------------------------------- /Wallpaper/Picture_16px_1171715_easyicon.net.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/Wallpaper/Picture_16px_1171715_easyicon.net.ico -------------------------------------------------------------------------------- /Wallpaper/Wallpaper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @Descripttion: 下载4K壁纸小程序 3 | @version: 1.0.0 4 | @Author: Xiaobai 5 | @Date: 2019-12-14 11:36:27 6 | @LastEditors : Xiaobai 7 | @LastEditTime : 2019-12-18 23:05:33 8 | ''' 9 | import urllib.request 10 | import parsel 11 | import os 12 | 13 | ## 分类选择操作 14 | def link_url(site): 15 | print("请小主人选择一下分类\n") 16 | pictureclass = ['/4kfengjing/','/4kmeinv/','/4kyouxi/','/4kdongman/','/4kyingshi/','/4kmingxing/','/4kqiche/','/4kdongwu/','/4krenwu/','/4kmeishi/','/4kzongjiao/','/4kbeijing/'] 17 | classname = ['0:风景','1:美女','2:游戏','3:动漫','4:影视','5:明星','6:汽车','7:动物','8:人物','9:美食','10:宗教','11:背景\n'] 18 | for each in classname:#遍历显示分类内容 19 | print(each) 20 | t=int(input('请输入分类序号:'))#提示输入分类编号 21 | if(t==1):#一个小提示 22 | print("哦哟哟,小心营养跟不上哦!^T_T^\n\n") 23 | url=site+pictureclass[t] 24 | #print(url) 25 | print('你选择了',classname[t][-2]+classname[t][-1]) 26 | return url#返回值 27 | 28 | ##请求操作 29 | def open_url(url):#打开网页 30 | req = urllib.request.Request(url) 31 | req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36') 32 | response = urllib.request.urlopen(url) 33 | html = response.read() 34 | return html#返回值 35 | 36 | ##获取当前分类的所有页面数 37 | def page_num(html):#获取页数 38 | html=html.decode('GBK')#将页面设置为GBK编码 39 | sel = parsel.Selector(html)#使用paesel模块 40 | div = sel.css('.page a::text')[-2].extract()#查找特定内容 41 | return div#返回值 42 | 43 | ##获取当前所有页链接地址 44 | def find_img(site,urls):#获取图片页面链接 45 | arr_url=[]#创建一个数组将链接添加到这个数组里面 46 | html=open_url(urls).decode('GBK')#请求页面后返回并设置为GBK编码格式 47 | sels = parsel.Selector(html)#使用paesel模块 48 | divs = sels.css('.slist ul')#筛选内容 49 | a_href =divs.css('li a::attr(href)').getall()#详细筛选需要的内容 50 | for i in a_href:#遍历 51 | pages=site+i#拼接url 52 | arr_url.append(pages)#添加到数组 53 | return arr_url#返回数组 54 | 55 | def search_img(site,num_url):#获取图片下载地址 56 | #print(num_url) 57 | html=open_url(num_url).decode('GBK')#请求页面后返回并设置为GBK编码格式 58 | selss = parsel.Selector(html)#使用paesel模块 59 | pic_url = selss.css('.photo .photo-pic img::attr(src)').getall()#获取图片下载链接地址 60 | picture=[]#创建数组 61 | for k in pic_url:#遍历 62 | url=site+k#拼接url 63 | picture.append(url)#添加到数组 64 | #print(picture) 65 | return picture#返回数组 66 | 67 | def search_title(site,num_url):#获取图片名称 68 | html=open_url(num_url).decode('GBK')#请求页面后返回并设置为GBK编码格式 69 | selss = parsel.Selector(html)#使用paesel模块 70 | pic_title = selss.css('.photo .photo-pic img::attr(title)').extract_first()#查找图片名称 71 | pictitle=[]#创建数组 72 | pictitle.append(pic_title)#将标题添加到数组 73 | #print(pictitle) 74 | return pictitle#返回值 75 | 76 | 77 | def save_img(folder,picurl,pictitle):#保存图片 78 | for each in picurl:#遍历所有图片链接 79 | suffix = each.split('.')[-1]#分割文件名 80 | for name in pictitle:#遍历所有图片名称 81 | if(os.path.exists(name+'.'+suffix)):#判断文件是否存在,如果存在则跳过 82 | pass 83 | else: 84 | with open(name+'.'+suffix,'wb') as f: 85 | img=open_url(each) 86 | f.write(img)#写入文件 87 | print('正在保存',name) 88 | 89 | 90 | def download(folder='Wallpaper'): 91 | if os.path.exists(folder):#判断目录是否存在,如果不存在则创建,存在则跳过 92 | pass 93 | else: 94 | os.mkdir(folder)#创建成功目录 95 | os.chdir(folder)#切换工作目录 96 | print('图片保存目录为:',os.getcwd())#打印当前工作目录 97 | site = 'http://pic.netbian.com'#目标网站地址 98 | url=link_url(site)#分类页链接 99 | #1页21张图片 100 | html=open_url(url)#打开网页 101 | page=int(page_num(html))+1#获取页数 102 | 103 | for i in range(1,page):#遍历所有页面 104 | ++i 105 | if(i!=1):#由于网页结构问题所有加此判断 106 | urls=url+'index_'+str(i)+'.html'#拼接链接 107 | else: 108 | urls=url+'index.html'#拼接链接 109 | num_page=find_img(site,urls)#查找当前页内所有的链接 110 | for num_url in num_page:#遍历各个页面 111 | picurl=search_img(site,num_url)#查找图片链接 112 | pictitle=search_title(site,num_url)#获取图片标题 113 | save_img(folder,picurl,pictitle)#保存获取的图片 114 | #print(pictitle) 115 | 116 | print("""使用方法:比如你想下载\'美食\'那么就在输入框内输入它的序号\'9\'然后按下回车即可\n如果要停止下载可以按\"CTRL+C\"或者直接关闭本窗口Linux系统也按\"CTRL+C\"""")#文本提示 117 | 118 | if __name__ == "__main__": 119 | download() 120 | -------------------------------------------------------------------------------- /Wallpaper/壁纸下载工具.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/Wallpaper/壁纸下载工具.exe -------------------------------------------------------------------------------- /emoji/emoji.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import parsel 3 | import os 4 | 5 | 6 | def open_url(): 7 | page = int(input("请输入要下载的页数然后回车即可:")) 8 | url = "https://www.fabiaoqing.com/biaoqing/lists/page/"+str(page)+".html" 9 | response = requests.get(url) 10 | html = response.text 11 | sel = parsel.Selector(html) 12 | divs = sel.css('.tagbqppdiv') 13 | urls = [] 14 | for div in divs: 15 | img_url = div.css('img.ui::attr(data-original)').getall() 16 | img_title = div.css('img.ui::attr(title)').getall() 17 | urls.append((img_url,img_title)) 18 | return urls 19 | 20 | 21 | def save_img(folder): 22 | urls =open_url() 23 | for url in urls: 24 | try: 25 | rep = requests.get(url[0][0]) 26 | suffix = url[0][0].split('.')[-1] 27 | with open(url[1][0]+'.'+suffix,mode='wb') as f: 28 | f.write(rep.content) 29 | print("正在下载:"+str(url[1][0])) 30 | except OSError: 31 | print("保存的文件名不规范,将跳过此文件") 32 | print("恭喜你!现在完成!") 33 | 34 | def download_emoji(folder="emoji"): 35 | print("本程序会自动采集表情网【fabiaoqing.com】的热门分类表情包") 36 | print("表情包会保存到和本程序同一目录下的emoji文件夹里面") 37 | print("打开链接地址:https://www.fabiaoqing.com/biaoqing查看要下载页面的页码") 38 | if os.path.exists('emoji'): 39 | pass 40 | else: 41 | os.mkdir(folder) 42 | os.chdir(folder) 43 | save_img(folder) 44 | 45 | if __name__ == "__main__": 46 | download_emoji() 47 | -------------------------------------------------------------------------------- /emoji/emoji_bak.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import parsel 3 | import os 4 | 5 | 6 | def open_url(): 7 | page = int(input("请输入要下载的页数然后回车即可:")) 8 | for i in range(0,page): 9 | pages = ++i 10 | url = "https://www.fabiaoqing.com/biaoqing/lists/page/"+str(pages)+".html" 11 | response = requests.get(url) 12 | html = response.text 13 | sel = parsel.Selector(html) 14 | divs = sel.css('.tagbqppdiv') 15 | urls = [] 16 | for div in divs: 17 | img_url = div.css('img.ui::attr(data-original)').getall() 18 | img_title = div.css('img.ui::attr(title)').getall() 19 | urls.append((img_url,img_title)) 20 | return urls 21 | 22 | 23 | def save_img(folder): 24 | urls =open_url() 25 | for url in urls: 26 | try: 27 | rep = requests.get(url[0][0]) 28 | suffix = url[0][0].split('.')[-1] 29 | with open(url[1][0]+'.'+suffix,mode='wb') as f: 30 | f.write(rep.content) 31 | print("正在下载:"+str(url[1][0])) 32 | except OSError: 33 | print("保存的文件名不规范,将跳过此文件") 34 | print("恭喜你!现在完成!") 35 | 36 | def download_emoji(folder="emoji"): 37 | print("本程序会自动采集表情网【fabiaoqing.com】的热门分类表情包") 38 | print("表情包会保存到和本程序同一目录下的emoji文件夹里面") 39 | print("距今2019/10/25此分类页只有200页所以在下面的下载页数中最多输入200\n如果之后的日期就打开网站看看") 40 | print("链接地址:https://www.fabiaoqing.com/biaoqing") 41 | if os.path.exists('emoji'): 42 | pass 43 | else: 44 | os.mkdir(folder) 45 | os.chdir(folder) 46 | save_img(folder) 47 | 48 | if __name__ == "__main__": 49 | download_emoji() 50 | -------------------------------------------------------------------------------- /gif/006WkX52gy1fw5l2ipx9cg309q0e61l2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/gif/006WkX52gy1fw5l2ipx9cg309q0e61l2.gif -------------------------------------------------------------------------------- /gif/GIF图片.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/gif/GIF图片.exe -------------------------------------------------------------------------------- /gif/GIF图片.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @Descripttion: 学习项目 3 | @version: 1.0.0 4 | @Author: Xiaobai 5 | @Date: 2019-12-23 18:30:11 6 | @LastEditors : Xiaobai 7 | @LastEditTime : 2019-12-23 22:07:15 8 | @BlogSite: https://www.xiaobaibk.com 9 | ''' 10 | 11 | import urllib.request 12 | import os,parsel,re 13 | from bs4 import BeautifulSoup 14 | 15 | def save_dir():#保存目录 16 | folder=str(input("[默认为当前目录下的GIF]请输入要保存的文件夹名字:")) 17 | if(folder==''): 18 | folder='GIF' 19 | if(os.path.exists(folder)): 20 | pass 21 | else: 22 | os.mkdir(folder) 23 | os.chdir(folder) 24 | #print('您的保存目录是:',os.getcwd()) 25 | 26 | def open_url(url):#打开网页 27 | req = urllib.request.Request(url) 28 | req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36') 29 | response = urllib.request.urlopen(url) 30 | html = response.read() 31 | return html 32 | 33 | def get_page(url):#查找页码 34 | url=url+"forum-38-1.html" 35 | html=open_url(url) 36 | soup=BeautifulSoup(html,'lxml').find('div',class_='pg') 37 | page=soup.find_all('a')[-2].get_text() 38 | return page 39 | 40 | def find_url(url,link):#查找图片页链接 41 | html=open_url(link) 42 | hr=parsel.Selector(html.decode('GBK')) 43 | th=hr.xpath("//h3/a/@href").extract() 44 | #banner=hr.xpath("//h3/a/@title").extract() 45 | urlnum=[] 46 | for k in th: 47 | urlnum.append(url+k) 48 | return urlnum 49 | 50 | def open_page(j):#打开页面并查找图片地址 51 | html=open_url(j) 52 | div=parsel.Selector(html.decode('GBK')) 53 | img=div.xpath("//div[@align='center']/img/@src").extract() 54 | return img 55 | 56 | def save_img(picurl):#保存图片 57 | for each in picurl: 58 | filename = each.split('/')[-1] 59 | if(os.path.exists(filename)): 60 | pass 61 | else: 62 | with open(filename,'wb')as file: 63 | picture=open_url(each) 64 | file.write(picture) 65 | #print('正在保存',filename) 66 | 67 | 68 | def main(): 69 | url="http://www.gifcc.com/" 70 | save_dir() 71 | page=int(get_page(url))+1 72 | print("正在保存文件中,请到",os.getcwd(),'目录下查看') 73 | print("停止保存请按“ctrl+c”或者直接关闭窗口") 74 | for i in range(1,page): 75 | ++i 76 | link=url+'forum-38-'+str(i)+'.html' 77 | img_url=find_url(url,link) 78 | for j in img_url: 79 | picurl=open_page(j) 80 | save_img(picurl) 81 | 82 | 83 | if __name__ == "__main__": 84 | main() -------------------------------------------------------------------------------- /meizi/meizi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/meizi/meizi.png -------------------------------------------------------------------------------- /meizi/meizi.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | import parsel 4 | 5 | print("本实例只下载几张图") 6 | def url_open(): 7 | response = requests.get("http://cos.top15.cn/") 8 | html = response.text 9 | sel = parsel.Selector(html) 10 | div =sel.css(".item") 11 | url = [] 12 | for divs in div: 13 | img_url = divs.css(".a-img .pic::attr(src)").extract() 14 | img_title = divs.css(".a-img .pic::attr(alt)").extract() 15 | url.append((img_url,img_title)) 16 | return url 17 | 18 | def save_img(): 19 | urls =url_open() 20 | for url in urls: 21 | response = requests.get(url[0][0]) 22 | print(response) 23 | suffix = url[0][0].split(".")[-1] 24 | with open(url[1][0]+"."+suffix,mode="wb")as f: 25 | f.write(response.content) 26 | 27 | 28 | 29 | 30 | def download_meizi(): 31 | if os.path.exists("meizi"): 32 | pass 33 | else: 34 | os.mkdir("meizi") 35 | os.chdir("meizi") 36 | save_img() 37 | 38 | if __name__=="__main__": 39 | download_meizi() 40 | 41 | -------------------------------------------------------------------------------- /meizi/page.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import json 3 | import os 4 | 5 | print("我已经开始下载咯^_^请稍等一下下,么么哒") 6 | if os.path.exists("mz"): 7 | pass 8 | else: 9 | os.mkdir("mz") 10 | os.chdir("mz") 11 | 12 | def img_open(url): 13 | url = url 14 | response = urllib.request.urlopen(url) 15 | html = response.read().decode("utf-8") 16 | target = json.loads(html) 17 | for i in range(20): 18 | try: 19 | try: 20 | result =target["data"][i]['title'] 21 | results =target["data"][i]['pic'] 22 | with open(result+".jpg",'wb')as f: 23 | response = urllib.request.urlopen(results) 24 | img = response.read() 25 | f.write(img) 26 | except: 27 | print("已经下载完成了") 28 | except IndexError: 29 | print('没有了') 30 | 31 | for temp in range(1,17): 32 | url = "http://cos.top15.cn/api/listApi.php?page="+str(temp) 33 | if temp ==1: 34 | img_open(url) 35 | elif temp ==2: 36 | img_open(url) 37 | elif temp ==3: 38 | img_open(url) 39 | elif temp ==4: 40 | img_open(url) 41 | elif temp ==5: 42 | img_open(url) 43 | elif temp ==6: 44 | img_open(url) 45 | elif temp ==7: 46 | img_open(url) 47 | elif temp ==8: 48 | img_open(url) 49 | elif temp ==9: 50 | img_open(url) 51 | elif temp ==10: 52 | img_open(url) 53 | elif temp ==11: 54 | img_open(url) 55 | elif temp ==12: 56 | img_open(url) 57 | elif temp ==13: 58 | img_open(url) 59 | elif temp ==14: 60 | img_open(url) 61 | elif temp ==15: 62 | img_open(url) 63 | elif temp ==16: 64 | img_open(url) 65 | else: 66 | print("已经没有了") 67 | 68 | -------------------------------------------------------------------------------- /netbian/4k.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/netbian/4k.exe -------------------------------------------------------------------------------- /netbian/4k.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import random 4 | 5 | 6 | def url_open(url_num): 7 | req = urllib.request.Request(url_num) 8 | req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36') 9 | response = urllib.request.urlopen(url_num) 10 | html = response.read() 11 | return html 12 | 13 | def url_get(load,url_num): 14 | try: 15 | html = url_open(url_num).decode('gbk') 16 | img_addrs = [] 17 | a = html.find('4: 15 | tkinter.messagebox.showinfo('小白提示','正在领取中请稍等哦....名片赞会在12小时内到账!') 16 | for each in url: 17 | url = each + str(txt) 18 | req = urllib.request.Request(url) 19 | req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36') 20 | response = urllib.request.urlopen(url) 21 | print(response.url) 22 | tkinter.messagebox.showinfo('小白提示','恭喜你,领取成功啦!') 23 | else: 24 | tkinter.messagebox.showinfo('小白提示','请输入正确的QQ号!') 25 | 26 | 27 | root = tk.Tk() 28 | root.title('QQ名片赞领取小助手') 29 | var=tk.StringVar() 30 | root.geometry("450x200") 31 | tk.Label(root, text='在文本框中输入QQ然后点领取按钮即可获得1000个QQ名片赞',font=('宋体', 12),pady=7,padx=5).grid(row=1,column=0) 32 | tk.Entry(root,width=40,textvariable=var).grid(row=2,column=0,pady=15) 33 | tk.Button(root,text="免 费 领 取",font=(10),command=open_url).grid(row=3,column=0,pady=10) 34 | tk.Label(root, text='公告:输入要刷的QQ即可!每天每个账号只能领取一次哦!',font=('楷体', 12),padx=5,pady=15).grid(row=5,column=0,pady=15) 35 | root.mainloop() 36 | 37 | 38 | -------------------------------------------------------------------------------- /qqmpz/qq_32px_1164466_easyicon.net.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/qqmpz/qq_32px_1164466_easyicon.net.ico -------------------------------------------------------------------------------- /translate/translate.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/translate/translate.exe -------------------------------------------------------------------------------- /translate/translate.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/translate/translate.ico -------------------------------------------------------------------------------- /translate/translate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/translate/translate.png -------------------------------------------------------------------------------- /translate/translate.py: -------------------------------------------------------------------------------- 1 | from tkinter import * 2 | import urllib.request 3 | import urllib.parse 4 | import json 5 | 6 | 7 | root = Tk() 8 | root.title("在线翻译小程序") 9 | root.geometry("550x300") 10 | #root.iconbitmap('E:\\python\\fanyi\\test\\translate.ico')#窗口图标 11 | 12 | frame=Frame(root) 13 | frame.pack() 14 | var=StringVar() 15 | 16 | Label(frame, text='请在下面的输入要翻译的内容',font=('Arial', 16),pady=10).grid(row=1,column=0) 17 | Entry(frame,width=75,textvariable=var).grid(row=2,column=0,padx=10) 18 | 19 | def translate(): 20 | url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule' 21 | content=var.get() 22 | data={} 23 | data['i']=content 24 | data['from']= 'AUTO' 25 | data['to']='AUTO' 26 | data['smartresult']=' dict' 27 | data['client']= 'fanyideskweb' 28 | data['salt']='15703737120406' 29 | data['sign'] ='441d708b3695e3e9e55bf599f1768cf5' 30 | data['ts']= '1570373712040' 31 | data['bv'] ='b4cf244dcaabcc8b2ae8b3c5559d3dd6' 32 | data['doctype'] ='json' 33 | data['version'] ='2.1' 34 | data['keyfrom']= 'fanyi.web' 35 | data['action']= 'FY_BY_CLICKBUTTION' 36 | 37 | data=urllib.parse.urlencode(data).encode('utf-8') 38 | 39 | response=urllib.request.urlopen(url,data) 40 | html=response.read().decode('utf-8') 41 | 42 | target=json.loads(html) 43 | results=target['translateResult'][0][0]['tgt'] 44 | t1.insert(INSERT,results) 45 | 46 | 47 | def re_translation(): 48 | t1.delete(0.0, END) 49 | var.set("") 50 | 51 | frame1=Frame(root) 52 | frame1.pack() 53 | 54 | Label(frame1, text='翻译结果',font=('Arial', 16),pady=10).grid(row=1,column=0) 55 | t1 = Text(frame1, height=5,width=75) 56 | t1.grid(row=2,column=0,padx=10) 57 | t1.config(state=NORMAL) 58 | 59 | frame2=Frame(root) 60 | frame2.pack() 61 | 62 | Button(frame2,text="翻 译",font=(15),command=translate).grid(row=1,column=0,padx=15,pady=15) 63 | Button(frame2,text="清 空",font=(15),command=re_translation).grid(row=1,column=1,padx=15,pady=15) 64 | 65 | mainloop() 66 | -------------------------------------------------------------------------------- /translate/translate_48px_1210171_easyicon.net.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lovebai/Python/23232b94f5360aedbb3df7e6b73a38a60220e6c3/translate/translate_48px_1210171_easyicon.net.ico --------------------------------------------------------------------------------