├── .gitignore ├── requirements.txt ├── config.py ├── .github └── workflows │ └── main.yml ├── github_issue.py ├── README.md ├── get_paper_from_pdf.py └── main.py /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | __pycache__ 3 | *.swp 4 | *.pyc 5 | .idea 6 | *.pdf 7 | *.PDF 8 | pdf_files -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4==4.6.3 2 | lxml 3 | arxiv==1.4.3 4 | PyMuPDF==1.21.1 5 | requests==2.26.0 6 | tiktoken==0.2.0 7 | tenacity==8.2.2 8 | pybase64==1.2.3 9 | Pillow==9.4.0 10 | openai==0.27.0 11 | markdown -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | from __future__ import unicode_literals 6 | 7 | # Authentication for user filing issue (must have read/write access to repository to add issue to) 8 | USERNAME = '' 9 | TOKEN = '' 10 | 11 | # The repository to add this issue to 12 | REPO_OWNER = '' 13 | REPO_NAME = '' 14 | 15 | # Set new submission url of subject 16 | NEW_SUB_URL = 'https://arxiv.org/list/cs/new' 17 | 18 | # Keywords to search 19 | KEYWORD_LIST = ["", ] 20 | 21 | 22 | OPENAI_API_KEYS = ['', ] 23 | LANGUAGE = "zh" # zh | en 24 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: "daily allerts" 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "10 20 * * 1,2,3,4,5,6" 9 | 10 | jobs: 11 | backup: 12 | runs-on: ubuntu-latest 13 | name: Backup 14 | timeout-minutes: 25 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 3.9 18 | uses: actions/setup-python@v1 19 | with: 20 | python-version: 3.9 21 | 22 | - name: Setup dependencies 23 | run: | 24 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 25 | - name: Run backup 26 | run: python main.py 27 | 28 | - name: Commit changes 29 | uses: elstudio/actions-js-build/commit@v3 30 | with: 31 | commitMessage: Automated snapshot 32 | -------------------------------------------------------------------------------- /github_issue.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | from __future__ import unicode_literals 6 | 7 | import json 8 | import requests 9 | from config import USERNAME, TOKEN, REPO_OWNER, REPO_NAME 10 | 11 | def make_github_issue(title, body=None, assignee=USERNAME, closed=False, labels=[]): 12 | # Create an issue on github.com using the given parameters 13 | # Url to create issues via POST 14 | url = 'https://api.github.com/repos/%s/%s/import/issues' % (REPO_OWNER, REPO_NAME) 15 | 16 | # Headers 17 | headers = { 18 | "Authorization": "token %s" % TOKEN, 19 | "Accept": "application/vnd.github.golden-comet-preview+json" 20 | } 21 | 22 | # Create our issue 23 | data = {'issue': {'title': title, 24 | 'body': body, 25 | 'assignee': assignee, 26 | 'closed': closed, 27 | 'labels': labels}} 28 | 29 | payload = json.dumps(data) 30 | 31 | # Add the issue to our repository 32 | response = requests.request("POST", url, data=payload, headers=headers) 33 | if response.status_code == 202: 34 | print ('Successfully created Issue "%s"' % title) 35 | else: 36 | print ('Could not create Issue "%s"' % title) 37 | print ('Response:', response.content) 38 | 39 | if __name__ == '__main__': 40 | title = 'Pretty title' 41 | body = 'Beautiful body' 42 | assignee = USERNAME 43 | closed = False 44 | labels = [ 45 | "imagenet", "image retrieval" 46 | ] 47 | 48 | make_github_issue(title, body, assignee, closed, labels) 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 日更文献订阅:搭建Arxiv论文日推流水线 2 | 3 | **本节内容适用人群:科研工作者,想要了解Arxiv日更文章,跟紧学科最新进展的人群;** 4 | 5 | ### 达成的效果 6 | 7 | 根据本节内容,你能达成如下效果: 8 | 9 | ```json 10 | 把所感兴趣的日更Arxiv文章以及相关总结(由chatgpt生成),以issue的形式更新在你的github个人仓库中,仅需浏览每日issue,即可了解每日科研进展,无需人工检索,无需本地化部署。 11 | ``` 12 | 13 | 展示个人仓库中的每日Issue: 14 | 15 | ```json 16 | Issue构成: 17 | - #关键词 18 | - ##论文名称 19 | - 论文链接、作者、摘要、chatgpt生成的总结 20 | ``` 21 | 22 | 23 | 24 | ## 需求分析 25 | 26 | 1、及时获取最新的感兴趣的arxiv论文,可以通过预设关键字,并匹配文章摘要,抓取感兴趣文章; 27 | 28 | 2,利用chatgpt对匹配的文章进行总结摘要; 29 | 30 | 3、自动化流程,通过github的action方法自动实现,定时操作(比如,每日更新一次),我们仅需查看特定仓库issue即可。 31 | 32 | ## 总体原理 33 | 34 | 1)利用python抓取特定Arxiv日更文章,下载匹配文章,利用ChatGPT API分析和总结文章内容,并形成issue,发布在个人github仓库中; 35 | 36 | 2)使用Github Action实现云端部署和定时操作; 37 | 38 | ## 快速上手 39 | 40 | ### 1、复制Folk本仓库 41 | 42 | 访问本仓库`https://github.com/justchenhao/ChatDailyPapers`,点击`Folk`克隆本仓库; 43 | 44 | 修改仓库名称(创库名),例如 "hello-world"; 45 | 46 | 该仓库用于承接每日推送的Arxiv文献信息。 47 | 48 | ### 2、把复制的仓库克隆到本地 49 | 50 | 克隆你的仓库到本地: 51 | 52 | ```bash 53 | git clone https://github.com/XXX/xxx.git 54 | ``` 55 | 56 | ### 3、定义配置文件`config.py`: 57 | 58 | ```python 59 | # Authentication for user filing issue (must have read/write access to repository to add issue to) 60 | USERNAME = 'changeme' #你的github账户名 61 | TOKEN = 'changeme' #你的个人访问令牌 62 | 63 | # The repository to add this issue to 64 | REPO_OWNER = 'changeme' #推送到的仓库拥有者账户名 65 | REPO_NAME = 'changeme' #推送到的仓库名 66 | 67 | # Set new submission url of subject 68 | NEW_SUB_URL = 'https://arxiv.org/list/cs/new' 69 | 70 | # Keywords to search 71 | KEYWORD_LIST = ["changeme"] 72 | 73 | OPENAI_API_KEYS = ["",] # chatgpt的api 74 | LANGUAGE = "zh" # zh | en # chatgpt返回的语言,中文zh或英文en 75 | ``` 76 | 77 | ### 4、提交更新到Github 78 | 79 | 在仓库根目录打开`bash`命令行,输入`git`推送命令: 80 | 81 | ```bash 82 | git status # 查看有更新的文件 83 | git add. # 缓存区增加文件 84 | git commit -m 'first commit' # 代码由缓存区提交到本地仓库区 85 | git push origin main # 推送origin分支到远程main仓库 86 | ``` 87 | 88 | 推送成功后,你会在仓库主页看到Github Action开始运行,等待一些时间后,可以在issues中发现一条更新,点击进去查看即可,同时在仓库的`export`文件夹中新增一个`.md`文件,记录了完整issues的内容。 89 | 90 | ## 更多功能 91 | 92 | ### 修改定时推送逻辑(可选) 93 | 94 | 在仓库目录`.github/workflows/`,修改`main.yml`文件: 95 | 96 | ``` 97 | name: "daily allerts" 98 | on: 99 | push: 100 | branches: 101 | - main 102 | schedule: 103 | - cron: "10 20 * * 1,2,3,4,5" 104 | jobs: 105 | backup: 106 | runs-on: ubuntu-latest 107 | name: Backup 108 | timeout-minutes: 25 109 | steps: 110 | - uses: actions/checkout@v2 111 | - name: Set up Python 3.9 112 | uses: actions/setup-python@v1 113 | with: 114 | python-version: 3.9 115 | - name: Setup dependencies 116 | run: | 117 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 118 | - name: Run backup 119 | run: python main.py 120 | 121 | - name: Commit changes 122 | uses: elstudio/actions-js-build/commit@v3 123 | with: 124 | commitMessage: Automated snapshot 125 | ``` 126 | 127 | 根据以上yml文件,github会定时触发预定流程(10 20 * * 1,2,3,4,5表示每周一,周二,周三,周四,周五的20:10,因为[arxiv](https://arxiv.org/help/submit)每天在零时区的20点更新,也就是说在东八区是每天4点更新,或者是在更新仓库后也会触发动作); 具体触发过程:在指定环境下,安装依赖指定文件(由requirements.txt指定),然后执行`main.py`文件,最后,提交变化更新本仓库; 128 | 129 | ### 本地调试 130 | 131 | 本地运行方法: 132 | 133 | ```cmd 134 | python PATH-TO-CODE/main.py 135 | ``` 136 | 137 | 本地调试过程中,可以在`main.py`中增加以下两行,支持本地代理: 138 | 139 | ```python 140 | os.environ["http_proxy"] = "http://127.0.0.1:XXXX" # 端口修改为本地代理端口 141 | os.environ["https_proxy"] = "http://127.0.0.1:XXXX" 142 | ``` 143 | 144 | 145 | 146 | ## Chatpaper 147 | 148 | 本仓库对Arxiv论文的检索和文献总结功能的实现,参考的是[Chatpaper](https://github.com/kaixindelele/ChatPaper)。 149 | 150 | 目前仅根据文献摘要和简介部分,进行以下5个问题的提问,使用者可以自行修改提示词,以实现更多定制化功能。 151 | 152 | ```python 153 | - (1):What is the research background of this article? 154 | - (2):What are the past methods? What are the problems with them? What difference is the proposed approach from existing methods? How does the proposed method address the mentioned problems? Is the proposed approach well-motivated? 155 | - (3):What is the contribution of the paper? 156 | - (4):What is the research methodology proposed in this paper? 157 | - (5):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals? 158 | ``` 159 | 160 | 161 | 162 | ### 注意事项 163 | 164 | 需要注意需要在本仓库的action设置中,把Workflow permissions改为read and write permission,以支持每日生成的`.md`文件正常上传到仓库。 165 | 166 | # 参考 167 | 168 | Git常见操作方法:https://www.ruanyifeng.com/blog/2015/12/git-cheat-sheet.html 169 | 170 | arxiv文章自动issue:https://github.com/kobiso/get-daily-arxiv-noti 171 | 172 | 配置github个人访问令牌:https://docs.github.com/cn/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token 173 | 174 | chatpaper:https://github.com/kaixindelele/ChatPaper 175 | 176 | -------------------------------------------------------------------------------- /get_paper_from_pdf.py: -------------------------------------------------------------------------------- 1 | import fitz, io, os 2 | from PIL import Image 3 | 4 | 5 | class Paper: 6 | def __init__(self, path, title='', url='', abs='', authers=[]): 7 | # 初始化函数,根据pdf路径初始化Paper对象 8 | self.url = url # 文章链接 9 | self.path = path # pdf路径 10 | self.section_names = [] # 段落标题 11 | self.section_texts = {} # 段落内容 12 | self.abs = abs 13 | self.title_page = 0 14 | if title == '': 15 | self.pdf = fitz.open(self.path) # pdf文档 16 | self.title = self.get_title() 17 | self.parse_pdf() 18 | else: 19 | self.title = title 20 | self.authers = authers 21 | self.roman_num = ["I", "II", 'III', "IV", "V", "VI", "VII", "VIII", "IIX", "IX", "X"] 22 | self.digit_num = [str(d+1) for d in range(10)] 23 | self.first_image = '' 24 | 25 | def parse_pdf(self): 26 | self.pdf = fitz.open(self.path) # pdf文档 27 | self.text_list = [page.get_text() for page in self.pdf] 28 | self.all_text = ' '.join(self.text_list) 29 | self.section_page_dict = self._get_all_page_index() # 段落与页码的对应字典 30 | print("section_page_dict", self.section_page_dict) 31 | self.section_text_dict = self._get_all_page() # 段落与内容的对应字典 32 | self.section_text_dict.update({"title": self.title}) 33 | self.section_text_dict.update({"paper_info": self.get_paper_info()}) 34 | self.pdf.close() 35 | 36 | def get_paper_info(self): 37 | first_page_text = self.pdf[self.title_page].get_text() 38 | if "Abstract" in self.section_text_dict.keys(): 39 | abstract_text = self.section_text_dict['Abstract'] 40 | else: 41 | abstract_text = self.abs 42 | first_page_text = first_page_text.replace(abstract_text, "") 43 | return first_page_text 44 | 45 | def get_image_path(self, image_path=''): 46 | """ 47 | 将PDF中的第一张图保存到image.png里面,存到本地目录,返回文件名称,供gitee读取 48 | :param filename: 图片所在路径,"C:\\Users\\Administrator\\Desktop\\nwd.pdf" 49 | :param image_path: 图片提取后的保存路径 50 | :return: 51 | """ 52 | # open file 53 | max_size = 0 54 | image_list = [] 55 | with fitz.Document(self.path) as my_pdf_file: 56 | # 遍历所有页面 57 | for page_number in range(1, len(my_pdf_file) + 1): 58 | # 查看独立页面 59 | page = my_pdf_file[page_number - 1] 60 | # 查看当前页所有图片 61 | images = page.get_images() 62 | # 遍历当前页面所有图片 63 | for image_number, image in enumerate(page.get_images(), start=1): 64 | # 访问图片xref 65 | xref_value = image[0] 66 | # 提取图片信息 67 | base_image = my_pdf_file.extract_image(xref_value) 68 | # 访问图片 69 | image_bytes = base_image["image"] 70 | # 获取图片扩展名 71 | ext = base_image["ext"] 72 | # 加载图片 73 | image = Image.open(io.BytesIO(image_bytes)) 74 | image_size = image.size[0] * image.size[1] 75 | if image_size > max_size: 76 | max_size = image_size 77 | image_list.append(image) 78 | for image in image_list: 79 | image_size = image.size[0] * image.size[1] 80 | if image_size == max_size: 81 | image_name = f"image.{ext}" 82 | im_path = os.path.join(image_path, image_name) 83 | print("im_path:", im_path) 84 | 85 | max_pix = 480 86 | origin_min_pix = min(image.size[0], image.size[1]) 87 | 88 | if image.size[0] > image.size[1]: 89 | min_pix = int(image.size[1] * (max_pix/image.size[0])) 90 | newsize = (max_pix, min_pix) 91 | else: 92 | min_pix = int(image.size[0] * (max_pix/image.size[1])) 93 | newsize = (min_pix, max_pix) 94 | image = image.resize(newsize) 95 | 96 | image.save(open(im_path, "wb")) 97 | return im_path, ext 98 | return None, None 99 | 100 | # 定义一个函数,根据字体的大小,识别每个章节名称,并返回一个列表 101 | def get_chapter_names(self,): 102 | # # 打开一个pdf文件 103 | doc = fitz.open(self.path) # pdf文档 104 | text_list = [page.get_text() for page in doc] 105 | all_text = '' 106 | for text in text_list: 107 | all_text += text 108 | # # 创建一个空列表,用于存储章节名称 109 | chapter_names = [] 110 | for line in all_text.split('\n'): 111 | line_list = line.split(' ') 112 | if '.' in line: 113 | point_split_list = line.split('.') 114 | space_split_list = line.split(' ') 115 | if 1 < len(space_split_list) < 5: 116 | if 1 < len(point_split_list) < 5 and (point_split_list[0] in self.roman_num or point_split_list[0] in self.digit_num): 117 | print("line:", line) 118 | chapter_names.append(line) 119 | # 这段代码可能会有新的bug,本意是为了消除"Introduction"的问题的! 120 | elif 1 < len(point_split_list) < 5: 121 | print("line:", line) 122 | chapter_names.append(line) 123 | 124 | return chapter_names 125 | 126 | def get_title(self): 127 | doc = self.pdf # 打开pdf文件 128 | max_font_size = 0 # 初始化最大字体大小为0 129 | max_string = "" # 初始化最大字体大小对应的字符串为空 130 | max_font_sizes = [0] 131 | for page_index, page in enumerate(doc): # 遍历每一页 132 | text = page.get_text("dict") # 获取页面上的文本信息 133 | blocks = text["blocks"] # 获取文本块列表 134 | for block in blocks: # 遍历每个文本块 135 | if block["type"] == 0 and len(block['lines']): # 如果是文字类型 136 | if len(block["lines"][0]["spans"]): 137 | font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小 138 | max_font_sizes.append(font_size) 139 | if font_size > max_font_size: # 如果字体大小大于当前最大值 140 | max_font_size = font_size # 更新最大值 141 | max_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串 142 | max_font_sizes.sort() 143 | print("max_font_sizes", max_font_sizes[-10:]) 144 | cur_title = '' 145 | for page_index, page in enumerate(doc): # 遍历每一页 146 | text = page.get_text("dict") # 获取页面上的文本信息 147 | blocks = text["blocks"] # 获取文本块列表 148 | for block in blocks: # 遍历每个文本块 149 | if block["type"] == 0 and len(block['lines']): # 如果是文字类型 150 | if len(block["lines"][0]["spans"]): 151 | cur_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串 152 | font_flags = block["lines"][0]["spans"][0]["flags"] # 获取第一行第一段文字的字体特征 153 | font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小 154 | # print(font_size) 155 | if abs(font_size - max_font_sizes[-1]) < 0.3 or abs(font_size - max_font_sizes[-2]) < 0.3: 156 | # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags) 157 | if len(cur_string) > 4 and "arXiv" not in cur_string: 158 | # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags) 159 | if cur_title == '' : 160 | cur_title += cur_string 161 | else: 162 | cur_title += ' ' + cur_string 163 | self.title_page = page_index 164 | # break 165 | title = cur_title.replace('\n', ' ') 166 | return title 167 | 168 | 169 | def _get_all_page_index(self): 170 | # 定义需要寻找的章节名称列表 171 | section_list = ["Abstract", 172 | 'Introduction', 'Related Work', 'Background', 173 | "Preliminary", "Problem Formulation", 174 | 'Methods', 'Methodology', "Method", 'Approach', 'Approaches', 175 | # exp 176 | "Materials and Methods", "Experiment Settings", 177 | 'Experiment', "Experimental Results", "Evaluation", "Experiments", 178 | "Results", 'Findings', 'Data Analysis', 179 | "Discussion", "Results and Discussion", "Conclusion", 180 | 'References'] 181 | # 初始化一个字典来存储找到的章节和它们在文档中出现的页码 182 | section_page_dict = {} 183 | # 遍历每一页文档 184 | for page_index, page in enumerate(self.pdf): 185 | # 获取当前页面的文本内容 186 | cur_text = page.get_text() 187 | # 遍历需要寻找的章节名称列表 188 | for section_name in section_list: 189 | # 将章节名称转换成大写形式 190 | section_name_upper = section_name.upper() 191 | # 如果当前页面包含"Abstract"这个关键词 192 | if "Abstract" == section_name and section_name in cur_text: 193 | # 将"Abstract"和它所在的页码加入字典中 194 | section_page_dict[section_name] = page_index 195 | # 如果当前页面包含章节名称,则将章节名称和它所在的页码加入字典中 196 | else: 197 | if section_name + '\n' in cur_text: 198 | section_page_dict[section_name] = page_index 199 | elif section_name_upper + '\n' in cur_text: 200 | section_page_dict[section_name] = page_index 201 | # 返回所有找到的章节名称及它们在文档中出现的页码 202 | return section_page_dict 203 | 204 | def _get_all_page(self): 205 | """ 206 | 获取PDF文件中每个页面的文本信息,并将文本信息按照章节组织成字典返回。 207 | 208 | Returns: 209 | section_dict (dict): 每个章节的文本信息字典,key为章节名,value为章节文本。 210 | """ 211 | text = '' 212 | text_list = [] 213 | section_dict = {} 214 | 215 | # 再处理其他章节: 216 | text_list = [page.get_text() for page in self.pdf] 217 | for sec_index, sec_name in enumerate(self.section_page_dict): 218 | print(sec_index, sec_name, self.section_page_dict[sec_name]) 219 | if sec_index <= 0 and self.abs: 220 | continue 221 | else: 222 | # 直接考虑后面的内容: 223 | start_page = self.section_page_dict[sec_name] 224 | if sec_index < len(list(self.section_page_dict.keys()))-1: 225 | end_page = self.section_page_dict[list(self.section_page_dict.keys())[sec_index+1]] 226 | else: 227 | end_page = len(text_list) 228 | print("start_page, end_page:", start_page, end_page) 229 | cur_sec_text = '' 230 | if end_page - start_page == 0: 231 | if sec_index < len(list(self.section_page_dict.keys()))-1: 232 | next_sec = list(self.section_page_dict.keys())[sec_index+1] 233 | if text_list[start_page].find(sec_name) == -1: 234 | start_i = text_list[start_page].find(sec_name.upper()) 235 | else: 236 | start_i = text_list[start_page].find(sec_name) 237 | if text_list[start_page].find(next_sec) == -1: 238 | end_i = text_list[start_page].find(next_sec.upper()) 239 | else: 240 | end_i = text_list[start_page].find(next_sec) 241 | cur_sec_text += text_list[start_page][start_i:end_i] 242 | else: 243 | for page_i in range(start_page, end_page): 244 | # print("page_i:", page_i) 245 | if page_i == start_page: 246 | if text_list[start_page].find(sec_name) == -1: 247 | start_i = text_list[start_page].find(sec_name.upper()) 248 | else: 249 | start_i = text_list[start_page].find(sec_name) 250 | cur_sec_text += text_list[page_i][start_i:] 251 | elif page_i < end_page: 252 | cur_sec_text += text_list[page_i] 253 | elif page_i == end_page: 254 | if sec_index < len(list(self.section_page_dict.keys()))-1: 255 | next_sec = list(self.section_page_dict.keys())[sec_index+1] 256 | if text_list[start_page].find(next_sec) == -1: 257 | end_i = text_list[start_page].find(next_sec.upper()) 258 | else: 259 | end_i = text_list[start_page].find(next_sec) 260 | cur_sec_text += text_list[page_i][:end_i] 261 | section_dict[sec_name] = cur_sec_text.replace('-\n', '').replace('\n', ' ') 262 | return section_dict 263 | 264 | def main(): 265 | path = r'demo.pdf' 266 | paper = Paper(path=path) 267 | paper.parse_pdf() 268 | for key, value in paper.section_text_dict.items(): 269 | print(key, value) 270 | print("*"*40) 271 | 272 | 273 | if __name__ == '__main__': 274 | main() 275 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import re 4 | import datetime 5 | import arxiv 6 | import openai, tenacity 7 | import base64, requests 8 | import argparse 9 | import tiktoken 10 | from get_paper_from_pdf import Paper 11 | 12 | from github_issue import make_github_issue 13 | 14 | 15 | # os.environ["http_proxy"] = "http://127.0.0.1:8118" 16 | # os.environ["https_proxy"] = "http://127.0.0.1:8118" 17 | 18 | from config import OPENAI_API_KEYS, KEYWORD_LIST, LANGUAGE 19 | 20 | from datetime import datetime, timedelta 21 | import pytz 22 | 23 | now = datetime.now(pytz.utc) 24 | yesterday = now - timedelta(days=1.1) 25 | 26 | 27 | # 定义Reader类 28 | class Reader: 29 | # 初始化方法,设置属性 30 | def __init__(self, filter_keys, filter_times_span=(yesterday, now), key_word=None, 31 | query=None, root_path='./', 32 | sort=arxiv.SortCriterion.LastUpdatedDate, 33 | user_name='defualt', args=None): 34 | self.user_name = user_name # 读者姓名 35 | self.key_word = key_word # 读者感兴趣的关键词 36 | self.query = query # 读者输入的搜索查询 37 | self.sort = sort # 读者选择的排序方式 38 | if args.language == 'en': 39 | self.language = 'English' 40 | elif args.language == 'zh': 41 | self.language = 'Chinese' 42 | else: 43 | self.language = 'Chinese' 44 | self.filter_keys = filter_keys # 用于在摘要中筛选的关键词 45 | self.filter_times_span = filter_times_span # 用于选定某区间更新的arxiv paper 46 | 47 | self.root_path = root_path 48 | 49 | # 获取某个键对应的值 50 | self.chat_api_list = OPENAI_API_KEYS 51 | 52 | self.cur_api = 0 53 | self.file_format = args.file_format 54 | self.max_token_num = 4096 55 | self.encoding = tiktoken.get_encoding("gpt2") 56 | 57 | def get_arxiv(self, max_results=30): 58 | # https://info.arxiv.org/help/api/user-manual.html#query_details 59 | search = arxiv.Search(query=self.query, 60 | max_results=max_results, 61 | sort_by=self.sort, 62 | sort_order=arxiv.SortOrder.Descending, 63 | ) 64 | return search 65 | 66 | def filter_arxiv(self, max_results=30): 67 | search = self.get_arxiv(max_results=max_results) 68 | print("all search:") 69 | for index, result in enumerate(search.results()): 70 | print(index, result.title, result.updated) 71 | 72 | filter_results = [] 73 | filter_keys = self.filter_keys 74 | 75 | print("filter_keys:", self.filter_keys) 76 | # 确保每个关键词都能在摘要中找到,才算是目标论文 77 | for index, result in enumerate(search.results()): 78 | # 过滤不在时间范围内的论文 79 | if result.updated < self.filter_times_span[0] or result.updated > self.filter_times_span[1]: 80 | continue 81 | abs_text = result.summary.replace('-\n', '-').replace('\n', ' ') 82 | meet_num = 0 83 | for f_key in filter_keys.split(" "): 84 | if f_key.lower() in abs_text.lower(): 85 | meet_num += 1 86 | if meet_num == len(filter_keys.split(" ")): 87 | filter_results.append(result) 88 | # break 89 | print("筛选后剩下的论文数量:") 90 | print("filter_results:", len(filter_results)) 91 | print("filter_papers:") 92 | for index, result in enumerate(filter_results): 93 | print(index, result.title, result.updated) 94 | return filter_results 95 | 96 | def validateTitle(self, title): 97 | # 将论文的乱七八糟的路径格式修正 98 | rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |' 99 | new_title = re.sub(rstr, "_", title) # 替换为下划线 100 | return new_title 101 | 102 | def download_pdf(self, filter_results): 103 | # 先创建文件夹 104 | date_str = str(datetime.now())[:13].replace(' ', '-') 105 | key_word = str(self.key_word.replace(':', ' ')) 106 | path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ', '').replace(':', ' ')[:25] + '-' + date_str 107 | try: 108 | os.makedirs(path) 109 | except: 110 | pass 111 | print("All_paper:", len(filter_results)) 112 | # 开始下载: 113 | paper_list = [] 114 | for r_index, result in enumerate(filter_results): 115 | try: 116 | title_str = self.validateTitle(result.title) 117 | pdf_name = title_str+'.pdf' 118 | # result.download_pdf(path, filename=pdf_name) 119 | self.try_download_pdf(result, path, pdf_name) 120 | paper_path = os.path.join(path, pdf_name) 121 | print("paper_path:", paper_path) 122 | paper = Paper(path=paper_path, 123 | url=result.entry_id, 124 | title=result.title, 125 | abs=result.summary.replace('-\n', '-').replace('\n', ' '), 126 | authers=[str(aut) for aut in result.authors], 127 | ) 128 | # 下载完毕,开始解析: 129 | paper.parse_pdf() 130 | paper_list.append(paper) 131 | except Exception as e: 132 | print("download_error:", e) 133 | pass 134 | return paper_list 135 | 136 | @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), 137 | stop=tenacity.stop_after_attempt(5), 138 | reraise=True) 139 | def try_download_pdf(self, result, path, pdf_name): 140 | result.download_pdf(path, filename=pdf_name) 141 | 142 | @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), 143 | stop=tenacity.stop_after_attempt(5), 144 | reraise=True) 145 | def upload_gitee(self, image_path, image_name='', ext='png'): 146 | """ 147 | 上传到码云 148 | :return: 149 | """ 150 | with open(image_path, 'rb') as f: 151 | base64_data = base64.b64encode(f.read()) 152 | base64_content = base64_data.decode() 153 | 154 | date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext 155 | path = image_name+ '-' +date_str 156 | 157 | payload = { 158 | "access_token": self.gitee_key, 159 | "owner": self.config.get('Gitee', 'owner'), 160 | "repo": self.config.get('Gitee', 'repo'), 161 | "path": self.config.get('Gitee', 'path'), 162 | "content": base64_content, 163 | "message": "upload image" 164 | } 165 | # 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字: 166 | url = f'https://gitee.com/api/v5/repos/'+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/'+path 167 | rep = requests.post(url, json=payload).json() 168 | print("rep:", rep) 169 | if 'content' in rep.keys(): 170 | image_url = rep['content']['download_url'] 171 | else: 172 | image_url = r"https://gitee.com/api/v5/repos/"+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/' + path 173 | 174 | return image_url 175 | 176 | def summary_with_chat(self, paper_list, htmls=None): 177 | if htmls is None: 178 | htmls = [] 179 | for paper_index, paper in enumerate(paper_list): 180 | # 第一步先用title,abs,和introduction进行总结。 181 | text = '' 182 | text += 'Title:' + paper.title 183 | text += 'Url:' + paper.url 184 | text += 'Abstrat:' + paper.abs 185 | text += 'Paper_info:' + paper.section_text_dict['paper_info'] 186 | # intro 187 | text += list(paper.section_text_dict.values())[0] 188 | chat_summary_text = "" 189 | try: 190 | chat_summary_text = self.chat_summary(text=text) 191 | except Exception as e: 192 | print("summary_error:", e) 193 | if "maximum context" in str(e): 194 | current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1 195 | offset = int(str(e)[current_tokens_index:current_tokens_index+4]) 196 | summary_prompt_token = offset+1000+150 197 | chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token) 198 | 199 | # htmls.append('## Paper:' + str(paper_index+1)) 200 | htmls.append(f'## {paper.title}') 201 | htmls.append(f'- **Url**: {paper.url}') 202 | htmls.append(f'- **Authors**: {paper.authers}') 203 | htmls.append(f'- **Abstrat**: {paper.abs}') 204 | htmls.append('\n') 205 | htmls.append(chat_summary_text) 206 | htmls.append('\n') 207 | # 第二步总结方法: 208 | # TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。 209 | # method_key = '' 210 | # for parse_key in paper.section_text_dict.keys(): 211 | # if 'method' in parse_key.lower() or 'approach' in parse_key.lower(): 212 | # method_key = parse_key 213 | # break 214 | 215 | # if method_key != '': 216 | # text = '' 217 | # method_text = '' 218 | # summary_text = '' 219 | # summary_text += "" + chat_summary_text 220 | # # methods 221 | # method_text += paper.section_text_dict[method_key] 222 | # text = summary_text + "\n\n:\n\n" + method_text 223 | # chat_method_text = "" 224 | # try: 225 | # chat_method_text = self.chat_method(text=text) 226 | # except Exception as e: 227 | # print("method_error:", e) 228 | # if "maximum context" in str(e): 229 | # current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1 230 | # offset = int(str(e)[current_tokens_index:current_tokens_index+4]) 231 | # method_prompt_token = offset+800+150 232 | # chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token) 233 | # htmls.append(chat_method_text) 234 | # else: 235 | # chat_method_text = '' 236 | # htmls.append("\n"*4) 237 | 238 | # # 第三步总结全文,并打分: 239 | # conclusion_key = '' 240 | # for parse_key in paper.section_text_dict.keys(): 241 | # if 'conclu' in parse_key.lower(): 242 | # conclusion_key = parse_key 243 | # break 244 | 245 | # text = '' 246 | # conclusion_text = '' 247 | # summary_text = '' 248 | # summary_text += "" + chat_summary_text + "\n :\n" + chat_method_text 249 | # if conclusion_key != '': 250 | # # conclusion 251 | # conclusion_text += paper.section_text_dict[conclusion_key] 252 | # text = summary_text + "\n\n:\n\n" + conclusion_text 253 | # else: 254 | # text = summary_text 255 | # chat_conclusion_text = "" 256 | # try: 257 | # chat_conclusion_text = self.chat_conclusion(text=text) 258 | # except Exception as e: 259 | # print("conclusion_error:", e) 260 | # if "maximum context" in str(e): 261 | # current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1 262 | # offset = int(str(e)[current_tokens_index:current_tokens_index+4]) 263 | # conclusion_prompt_token = offset+800+150 264 | # chat_conclusion_text = self.chat_conclusion(text=text, conclusion_prompt_token=conclusion_prompt_token) 265 | # htmls.append(chat_conclusion_text) 266 | # htmls.append("\n"*4) 267 | 268 | # file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md") 269 | # self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode) 270 | # self.save_to_file(htmls) 271 | # htmls = [] 272 | 273 | @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), 274 | stop=tenacity.stop_after_attempt(5), 275 | reraise=True) 276 | def chat_conclusion(self, text, conclusion_prompt_token = 800): 277 | openai.api_key = self.chat_api_list[self.cur_api] 278 | self.cur_api += 1 279 | self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api 280 | text_token = len(self.encoding.encode(text)) 281 | clip_text_index = int(len(text)*(self.max_token_num-conclusion_prompt_token)/text_token) 282 | clip_text = text[:clip_text_index] 283 | 284 | messages=[ 285 | {"role": "system", "content": "You are a reviewer in the field of ["+self.key_word+"] and you need to critically review this article"}, # chatgpt 角色 286 | {"role": "assistant", "content": "This is the and part of an English literature, where you have already summarized, but part, I need your help to summarize the following questions:"+clip_text}, # 背景知识,可以参考OpenReview的审稿流程 287 | {"role": "user", "content": """ 288 | 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English). 289 | - (1):What is the significance of this piece of work? 290 | - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. 291 | ....... 292 | Follow the format of the output later: 293 | 8. Conclusion: \n\n 294 | - (1):xxx;\n 295 | - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n 296 | 297 | Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous , the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. 298 | """.format(self.language, self.language)}, 299 | ] 300 | response = openai.ChatCompletion.create( 301 | model="gpt-3.5-turbo", 302 | # prompt需要用英语替换,少占用token。 303 | messages=messages, 304 | ) 305 | result = '' 306 | for choice in response.choices: 307 | result += choice.message.content 308 | print("conclusion_result:\n", result) 309 | print("prompt_token_used:", response.usage.prompt_tokens, 310 | "completion_token_used:", response.usage.completion_tokens, 311 | "total_token_used:", response.usage.total_tokens) 312 | print("response_time:", response.response_ms/1000.0, 's') 313 | return result 314 | 315 | @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), 316 | stop=tenacity.stop_after_attempt(5), 317 | reraise=True) 318 | def chat_method(self, text, method_prompt_token = 800): 319 | openai.api_key = self.chat_api_list[self.cur_api] 320 | self.cur_api += 1 321 | self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api 322 | text_token = len(self.encoding.encode(text)) 323 | clip_text_index = int(len(text)*(self.max_token_num-method_prompt_token)/text_token) 324 | clip_text = text[:clip_text_index] 325 | messages=[ 326 | {"role": "system", "content": "You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements"}, # chatgpt 角色 327 | {"role": "assistant", "content": "This is the and part of an English document, where you have summarized, but the part, I need your help to read and summarize the following questions."+clip_text}, # 背景知识 328 | {"role": "user", "content": """ 329 | 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are. 330 | - (1):... 331 | - (2):... 332 | - (3):... 333 | - ....... 334 | Follow the format of the output that follows: 335 | 7. Methods: \n\n 336 | - (1):xxx;\n 337 | - (2):xxx;\n 338 | - (3):xxx;\n 339 | ....... \n\n 340 | 341 | Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous , the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. 342 | """.format(self.language, self.language)}, 343 | ] 344 | response = openai.ChatCompletion.create( 345 | model="gpt-3.5-turbo", 346 | messages=messages, 347 | ) 348 | result = '' 349 | for choice in response.choices: 350 | result += choice.message.content 351 | print("method_result:\n", result) 352 | print("prompt_token_used:", response.usage.prompt_tokens, 353 | "completion_token_used:", response.usage.completion_tokens, 354 | "total_token_used:", response.usage.total_tokens) 355 | print("response_time:", response.response_ms/1000.0, 's') 356 | return result 357 | 358 | @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), 359 | stop=tenacity.stop_after_attempt(5), 360 | reraise=True) 361 | def chat_summary(self, text, summary_prompt_token = 1100): 362 | openai.api_key = self.chat_api_list[self.cur_api] 363 | self.cur_api += 1 364 | self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api 365 | text_token = len(self.encoding.encode(text)) 366 | clip_text_index = int(len(text)*(self.max_token_num-summary_prompt_token)/text_token) 367 | clip_text = text[:clip_text_index] 368 | messages=[ 369 | {"role": "system", "content": "You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements"}, 370 | {"role": "assistant", "content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: "+clip_text}, 371 | {"role": "user", "content": """ 372 | summarize according to the following five points.Be sure to use {} answers (proper nouns need to be marked in English) 373 | - (1):What is the research background of this article? 374 | - (2):What are the past methods? What are the problems with them? What difference is the proposed approach from existing methods? How does the proposed method address the mentioned problems? Is the proposed approach well-motivated? 375 | - (3):What is the contribution of the paper? 376 | - (4):What is the research methodology proposed in this paper? 377 | - (5):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals? 378 | Follow the format of the output that follows: 379 | **Summary**: \n\n 380 | - (1):xxx;\n 381 | - (2):xxx;\n 382 | - (3):xxx;\n 383 | - (4):xxx;\n 384 | - (5):xxx.\n\n 385 | 386 | Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. 387 | """.format(self.language, self.language, self.language)}, 388 | ] 389 | 390 | response = openai.ChatCompletion.create( 391 | model="gpt-3.5-turbo", 392 | messages=messages, 393 | ) 394 | result = '' 395 | for choice in response.choices: 396 | result += choice.message.content 397 | print("summary_result:\n", result) 398 | print("prompt_token_used:", response.usage.prompt_tokens, 399 | "completion_token_used:", response.usage.completion_tokens, 400 | "total_token_used:", response.usage.total_tokens) 401 | print("response_time:", response.response_ms/1000.0, 's') 402 | return result 403 | 404 | # 定义一个方法,打印出读者信息 405 | def show_info(self): 406 | print(f"Key word: {self.key_word}") 407 | print(f"Query: {self.query}") 408 | print(f"Sort: {self.sort}") 409 | 410 | def save_to_file(htmls, root_path='./', date_str=None, file_format='md'): 411 | # # 整合成一个文件,打包保存下来。 412 | if date_str is None: 413 | date_str = str(datetime.now())[:13].replace(' ', '-') 414 | try: 415 | export_path = os.path.join(root_path, 'export') 416 | os.makedirs(export_path, exist_ok=True) 417 | except: 418 | pass 419 | mode = 'a' 420 | file_name = os.path.join(export_path, date_str+"."+file_format) 421 | export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode) 422 | 423 | def export_to_markdown(text, file_name, mode='w'): 424 | # 使用markdown模块的convert方法,将文本转换为html格式 425 | # html = markdown.markdown(text) 426 | # 打开一个文件,以写入模式 427 | with open(file_name, mode, encoding="utf-8") as f: 428 | # 将html格式的内容写入文件 429 | f.write(text) 430 | 431 | def main(args): 432 | # 创建一个Reader对象,并调用show_info方法 433 | if args.sort == 'Relevance': 434 | sort = arxiv.SortCriterion.Relevance 435 | elif args.sort == 'LastUpdatedDate': 436 | sort = arxiv.SortCriterion.LastUpdatedDate 437 | else: 438 | sort = arxiv.SortCriterion.Relevance 439 | 440 | if args.pdf_path: 441 | reader1 = Reader(key_word=args.key_word, 442 | query=args.query, 443 | filter_keys=args.filter_keys, 444 | sort=sort, 445 | args=args 446 | ) 447 | reader1.show_info() 448 | # 开始判断是路径还是文件: 449 | paper_list = [] 450 | if args.pdf_path.endswith(".pdf"): 451 | paper_list.append(Paper(path=args.pdf_path)) 452 | else: 453 | for root, dirs, files in os.walk(args.pdf_path): 454 | print("root:", root, "dirs:", dirs, 'files:', files) #当前目录路径 455 | for filename in files: 456 | # 如果找到PDF文件,则将其复制到目标文件夹中 457 | if filename.endswith(".pdf"): 458 | paper_list.append(Paper(path=os.path.join(root, filename))) 459 | print("------------------paper_num: {}------------------".format(len(paper_list))) 460 | [print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)] 461 | reader1.summary_with_chat(paper_list=paper_list) 462 | else: 463 | filter_times_span = (now-timedelta(days=args.filter_times_span), now) 464 | title = str(now)[:13].replace(' ', '-') 465 | htmls_body = [] 466 | for filter_key in args.filter_keys: 467 | # 对于每一个主题做一遍 468 | # filter_key: remote sensing 469 | # query: all:remote AND all:sensing 470 | key_word = filter_key 471 | query = '' 472 | for item in filter_key.split(" "): 473 | if query != '': 474 | query += ' AND ' 475 | query += f'all:{item}' 476 | htmls = [] 477 | htmls.append(f'# {filter_key}') 478 | reader1 = Reader(key_word=key_word, 479 | query=query, 480 | filter_keys=filter_key, 481 | filter_times_span=filter_times_span, 482 | sort=sort, 483 | args=args 484 | ) 485 | reader1.show_info() 486 | filter_results = reader1.filter_arxiv(max_results=args.max_results) 487 | paper_list = reader1.download_pdf(filter_results) 488 | reader1.summary_with_chat(paper_list=paper_list, htmls=htmls) 489 | # htmls.append("#######test#########") 490 | htmls_body += htmls 491 | save_to_file(htmls_body, date_str=title, root_path='./') 492 | make_github_issue(title=title, body="\n".join(htmls_body), labels=args.filter_keys) 493 | 494 | if __name__ == '__main__': 495 | parser = argparse.ArgumentParser() 496 | parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query") 497 | parser.add_argument("--query", type=str, default='all:remote AND all:sensing', help="the query string, ti: xx, au: xx, all: xx,") 498 | parser.add_argument("--key_word", type=str, default='remote sensing', help="the key word of user research fields") 499 | parser.add_argument("--filter_keys", type=list, default=KEYWORD_LIST, help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文") 500 | parser.add_argument("--filter_times_span", type=int, default=1.1, help='how many days of files to be filtered.') 501 | parser.add_argument("--max_results", type=int, default=20, help="the maximum number of results") 502 | # arxiv.SortCriterion.Relevance 503 | parser.add_argument("--sort", type=str, default="LastUpdatedDate", help="another is LastUpdatedDate | Relevance") 504 | parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱") 505 | parser.add_argument("--language", type=str, default=LANGUAGE, help="The other output lauguage is English, is en") 506 | 507 | args = parser.parse_args() 508 | import time 509 | start_time = time.time() 510 | main(args=args) 511 | print("summary time:", time.time() - start_time) 512 | 513 | --------------------------------------------------------------------------------