[^<]+
'
628 | result = GetRE(content,regexp)
629 | videolist = []
630 | for aid,title in result:
631 | videolist.append(Video(aid,title))
632 | return videolist
633 |
634 |
635 | if __name__ == "__main__":
636 | xmlid = '8581509'
637 | Desktop_Path = '%s/Desktop/'%(os.path.expanduser('~'))
638 | fid = open('%s%s.xml'%(Desktop_Path,xmlid))
639 | Danmaku2ASS(fid.read(),r'%s%s.ass'%(Desktop_Path,xmlid), 640, 360, 0, 'sans-serif', 15, 0.5, 10, False)
--------------------------------------------------------------------------------
/xml2ass/source/convert.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 26 23:42:03 2014
4 |
5 | @author: Vespa
6 | """
7 |
8 | from support import *
9 | def Danmaku2ASS(input_files, output_file, stage_width, stage_height, reserve_blank=0, font_face='sans-serif', font_size=25.0, text_opacity=1.0, comment_duration=5.0, is_reduce_comments=False, progress_callback=None):
10 | """
11 | 获取弹幕转化成ass文件
12 | input_files:弹幕文件,可由GetDanmuku(cid)获得
13 | output_file:输出ASS文件路径
14 | """
15 | fo = None
16 | comments = ReadComments(input_files, font_size)
17 | try:
18 | fo = ConvertToFile(output_file, 'w')
19 | ProcessComments(comments, fo, stage_width, stage_height, reserve_blank, font_face, font_size, text_opacity, comment_duration, is_reduce_comments, progress_callback)
20 | finally:
21 | if output_file and fo != output_file:
22 | fo.close()
23 |
24 |
25 | if __name__ == "__main__":
26 | xmlid = "{query}"
27 | Desktop_Path = '%s/Desktop/'%(os.path.expanduser('~'))
28 | fid = open('%s%s.xml'%(Desktop_Path,xmlid))
29 | Danmaku2ASS(fid.read(),r'%s%s.ass'%(Desktop_Path,xmlid), 640, 360, 0, 'sans-serif', 15, 0.5, 10, False)
--------------------------------------------------------------------------------
/xml2ass/source/support.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 26 23:59:09 2014
4 |
5 | @author: Vespa
6 | """
7 | import urllib2
8 | import urllib
9 | import re
10 | import json
11 | import zlib
12 | import gzip
13 | import xml.dom.minidom
14 | import hashlib
15 | from biclass import *
16 | import time
17 | import sys
18 | import os
19 | from GetAssDanmaku import *
20 | def GetRE(content,regexp):
21 | return re.findall(regexp, content)
22 |
23 | def getURLContent(url):
24 | while True:
25 | flag = 1
26 | try:
27 | headers = {'User-Agent':'Mozilla/5.0 (Windows U Windows NT 6.1 en-US rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
28 | req = urllib2.Request(url = url,headers = headers)
29 | page = urllib2.urlopen(req)
30 | content = page.read()
31 | except urllib2.HTTPError,e:
32 | if e.code == 404:
33 | return ""
34 | flag = 0
35 | time.sleep(5)
36 | if flag == 1:
37 | break
38 | if page.info().get('Content-Encoding') == 'gzip':
39 | content = zlib.decompress(content, 16+zlib.MAX_WBITS)
40 | return content
41 |
42 | class JsonInfo():
43 | def __init__(self,url,pre_deal=lambda x:x):
44 | self.info = json.loads(pre_deal(getURLContent(url)))
45 | if self.info.has_key('code') and self.info['code'] != 0:
46 | if self.info.has_key('message'):
47 | print "【Error】code=%d, msg=%s, url=%s"%(self.info['code'],self.Getvalue('message'),url)
48 | self.ERROR_MSG = self.Getvalue('message')
49 | elif self.info.has_key('error'):
50 | print "【Error】code=%d, msg=%s, url=%s"%(self.info['code'],self.Getvalue('error'),url)
51 | self.ERROR_MSG = self.Getvalue('error')
52 | self.error = True
53 | def Getvalue(self,*keys):
54 | if len(keys) == 0:
55 | return None
56 | if self.info.has_key(keys[0]):
57 | temp = self.info[keys[0]]
58 | else:
59 | return None
60 | if len(keys) > 1:
61 | for key in keys[1:]:
62 | if type(temp) == dict and temp.has_key(key):
63 | temp = temp[key]
64 | else:
65 | return None
66 | if isinstance(temp,unicode):
67 | temp = temp.encode('utf8')
68 | return temp
69 | info = None
70 | error = False
71 | ERROR_MSG = ""
72 |
73 | def GetString(t):
74 | if type(t) == int:
75 | return str(t)
76 | return t
77 |
78 | def getint(string):
79 | try:
80 | i = int(string)
81 | except:
82 | i = 0
83 | return i
84 |
85 | def DictDecode2UTF8(dict):
86 | for keys in dict:
87 | if isinstance(dict[keys],unicode):
88 | dict[keys] = dict[keys].encode('utf8')
89 | return dict
90 |
91 | def GetVideoFromRate(content):
92 | """
93 | 从视频搜索源码页面提取视频信息
94 | """
95 | #av号和标题
96 | regular1 = r']*>(.*)'
97 | info1 = GetRE(content,regular1)
98 | #观看数
99 | regular2 = r'
\1'
100 | info2 = GetRE(content,regular2)
101 | #收藏
102 | regular3 = r'
\1'
103 | info3 = GetRE(content,regular3)
104 | #弹幕
105 | regular4 = r'
\1'
106 | info4 = GetRE(content,regular4)
107 | #日期
108 | regular5 = r'
(.+)'
109 | info5 = GetRE(content,regular5)
110 | #封面
111 | regular6 = r'
![]()
]*>'
112 | info6 = GetRE(content,regular6)
113 | #Up的id和名字
114 | regular7 = r'
(.+)'
115 | info7 = GetRE(content,regular7)
116 | #!!!!!!!!这里可以断言所有信息长度相等
117 | videoNum = len(info1)#视频长度
118 | videoList = []
119 |
120 | for i in range(videoNum):
121 | video_t = Video()
122 | video_t.aid = getint(info1[i][0])
123 | video_t.title = info1[i][1]
124 | video_t.guankan = getint(info2[i])
125 | video_t.shoucang = getint(info3[i])
126 | video_t.danmu = getint(info4[i])
127 | video_t.date = info5[i]
128 | video_t.cover = info6[i]
129 | video_t.author = User(info7[i][0],info7[i][1])
130 | videoList.append(video_t)
131 | return videoList
132 |
133 | def GetSign(params, appkey, AppSecret=None):
134 | """
135 | 获取新版API的签名,不然会返回-3错误
136 | """
137 | params['appkey']=appkey
138 | data = ""
139 | paras = params.keys()
140 | paras.sort()
141 | for para in paras:
142 | if data != "":
143 | data += "&"
144 | data += para + "=" + str(urllib.quote(GetString(params[para])))
145 | if AppSecret == None:
146 | return data
147 | m = hashlib.md5()
148 | m.update(data+AppSecret)
149 | return data+'&sign='+m.hexdigest()
150 |
151 | def ParseComment(danmu):
152 | dom = xml.dom.minidom.parseString(danmu)
153 | comment_element = dom.getElementsByTagName('d')
154 | for i, comment in enumerate(comment_element):
155 | p = str(comment.getAttribute('p')).split(',')
156 | danmu = Danmu()
157 | danmu.t_video = float(p[0])
158 | danmu.danmu_type = int(p[1])
159 | danmu.t_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(p[4])))
160 | danmu.mid_crc = p[6]
161 | danmu.danmu_color = ConvertColor(int(p[3]))
162 | danmu_fontsize = int(p[2])
163 | if len(comment.childNodes) != 0:
164 | danmu.content = str(comment.childNodes[0].wholeText).replace('/n', '\n')
165 | else:
166 | danmu.content = ""
167 | yield danmu
--------------------------------------------------------------------------------
/xml2ass/source/xml2ass.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon May 26 23:42:03 2014
4 |
5 | @author: Vespa
6 | """
7 |
8 | import os
9 |
10 | from Feedback import *
11 |
12 | fb = Feedback()
13 |
14 | Desktop_Path = '%s/Desktop/'%(os.path.expanduser('~'))
15 | list_dirs = os.listdir(Desktop_Path)
16 | for line in list_dirs:
17 | filepath = os.path.join(Desktop_Path,line)
18 | if not os.path.isdir(filepath):
19 | if line.endswith('xml'):
20 | xmlid = line.split('.')[0]
21 | fb.add_item(xmlid+'.xml',subtitle='转化'+xmlid+'.xml',arg=xmlid)
22 |
23 | print fb
--------------------------------------------------------------------------------
/xml2ass/xml2ass.alfredworkflow:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vespa314/BilibiliAlfredWorkFlows/43f3a27d35bd28118bf9935d4236344b2d5d86d7/xml2ass/xml2ass.alfredworkflow
--------------------------------------------------------------------------------