├── .env.tpl ├── .gitignore ├── README.md ├── config.py ├── docs └── images │ ├── duma.jpg │ ├── gzh.jpeg │ └── screen1.png ├── gpt_server.py ├── gr_funcs.py ├── llms ├── chatglm.py ├── chatgpt.py ├── llm.py ├── qwen.py ├── requirements_chatglm3.txt └── requirements_qwen.txt ├── main.py ├── requirements.txt └── utils.py /.env.tpl: -------------------------------------------------------------------------------- 1 | # 项目 2 | [prj] 3 | # 【必须设置】待分析项目的绝对路径 4 | dir= 5 | 6 | # 配置 openai 相关的参数 7 | [openai] 8 | # 【非必须设置】chatgpt的接口url,如果使用的是非官网,如:国内转发的接口,则需要修改为你的url 9 | base_url=https://api.openai.com/v1 10 | # 【非必须设置】配置代理,若使用官网接口,并且是国内网络环境,则需要设置代理。 11 | http_proxy= 12 | https_proxy= 13 | # 【非必须设置】如果使用chatgpt分析必须设置,若使用开源大模型,不必设置 14 | api_key=sk- 15 | 16 | # 配置本地大模型相关的参数 17 | [local_llm] 18 | # 【非必须设置】如果使用开源大模型,需要设置本地大模型缓存的绝对路径 19 | modelscope_cache= -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | .DS_Store 3 | .idea 4 | Untitled.ipynb 5 | __pycache__ 6 | tmp.txt 7 | .env -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 项目阅读助手 2 | 3 | 该项目仅用于学习交流,切勿用于商业用途,造成**代码泄露** 4 | 5 | **需要 Chat-Code 模式: ChatGPT+本地代码库(embedding)的小伙伴,切换到 v2.0 分支。** 6 | 7 | **假期赶着回家开发得比较急,还有很多不完善的地方,节后慢慢更新。** 8 | 9 | ### 安装依赖 10 | 11 | python 版本需要 3.10 或以上 12 | 13 | 如果用 ChatGPT,执行下面命令安装依赖包 14 | ```shell 15 | pip install -r requirements.txt 16 | ``` 17 | 18 | 如果用开源大模型,在`llms`目录安装对应的依赖,如:`ChatGLM3`执行 19 | ```shell 20 | pip install -r llms/requirements_chatglm3.txt 21 | ``` 22 | 23 | ### 修改配置文件 24 | 25 | 项目根目录下有个`.env.tpl`模板配置文件。将其重命名文`.env`文件,修改里面的配置项即可。 26 | 27 | 每个配置项在文件里都有详细说明,按照说明配置即可。 28 | 29 | 如果你想接入自己的大模型,在`llms`目录中自定义一个类,继承`LLM`类,实现`request`函数即可。 30 | 31 | ### 运行 32 | 33 | ```shell 34 | python main.py 35 | ``` 36 | 37 | ### 注意事项 38 | 39 | 1. 关于模型选择,3.5和4.0都可以,我在视频演示用的是3.5(gpt-3.5-turbo-1106) 40 | 2. 模型上下文最好在 16k 以上,因为有些源文件比较大,上下文太小可能长度不够 41 | 3. 把非源代码的文件删掉,如:压缩文件、图片、模型权重等。阅读这些文件无意义,可能产生不必要的报错,甚至浪费你的api额度 42 | 4. **关注你的api额度**,一上来尽量不要读文件多、文件大的项目,建议先用小项目试试,关注一下api额度消耗情况。 43 | 44 | 这个小项目还有很多不完善的地方,欢迎大家提出改进意见,也欢迎大家提交代码 45 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import os 3 | 4 | model_list = [ 5 | 'gpt-3.5-turbo-1106', 6 | 'gpt-4-1106-preview', 7 | 'chatglm3-6b', 8 | 'Qwen-7B-Chat', 9 | 'Qwen-14B-Chat', 10 | 'Qwen-14B-Chat-Int8', 11 | 'Qwen-14B-Chat-Int4' 12 | ] 13 | 14 | 15 | def init_config(): 16 | # 创建一个配置解析器对象 17 | config = configparser.ConfigParser() 18 | config.read('.env') 19 | 20 | # 项目目录 21 | os.environ['PRJ_DIR'] = config.get('prj', 'dir') 22 | if not os.environ['PRJ_DIR']: 23 | raise ValueError('没有设置项目路径') 24 | 25 | # 配置 openai 环境变量 26 | os.environ['OPENAI_BASE_URL'] = config.get('openai', 'base_url') 27 | os.environ['OPENAI_API_KEY'] = config.get('openai', 'api_key') 28 | 29 | # 设置代理 30 | http_proxy = config.get('openai', 'http_proxy') 31 | https_proxy = config.get('openai', 'https_proxy') 32 | if http_proxy: 33 | os.environ['http_proxy'] = http_proxy 34 | if https_proxy: 35 | os.environ['https_proxy'] = https_proxy 36 | 37 | # 配置本地大模型,魔搭环境变量 38 | modelscope_cache = config.get('local_llm', 'modelscope_cache') 39 | if modelscope_cache: 40 | os.environ['MODELSCOPE_CACHE'] = modelscope_cache 41 | -------------------------------------------------------------------------------- /docs/images/duma.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duma-repo/ai_code_reader/20dabd632f6c7358ecd73529c62d083441cdc2b0/docs/images/duma.jpg -------------------------------------------------------------------------------- /docs/images/gzh.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duma-repo/ai_code_reader/20dabd632f6c7358ecd73529c62d083441cdc2b0/docs/images/gzh.jpeg -------------------------------------------------------------------------------- /docs/images/screen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duma-repo/ai_code_reader/20dabd632f6c7358ecd73529c62d083441cdc2b0/docs/images/screen1.png -------------------------------------------------------------------------------- /gpt_server.py: -------------------------------------------------------------------------------- 1 | from llms.llm import LLM 2 | 3 | model: LLM = None 4 | 5 | 6 | def set_llm(model_name): 7 | global model 8 | 9 | model_cat = model_name.split('-')[0] 10 | if model_cat == 'gpt': 11 | from llms.chatgpt import ChatGPT 12 | model = ChatGPT(model_name) 13 | elif model_cat == 'chatglm3': 14 | from llms.chatglm import ChatGLM3 15 | model = ChatGLM3(model_name) 16 | elif model_cat == 'Qwen': 17 | from llms.qwen import Qwen 18 | model = Qwen(model_name) 19 | else: 20 | raise Exception(f'不支持的模型 {model_name}') 21 | 22 | 23 | def request_llm(sys_prompt: str, user_prompt: list, stream=False): 24 | return model.request(sys_prompt, user_prompt, stream) 25 | -------------------------------------------------------------------------------- /gr_funcs.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | 4 | import gradio as gr 5 | import utils 6 | import gpt_server 7 | 8 | llm_response = {} 9 | 10 | 11 | def analyse_project(prj_path, progress=gr.Progress()): 12 | global llm_response 13 | llm_response = {} 14 | file_list = utils.get_all_files_in_folder(prj_path) 15 | 16 | for i, file_name in enumerate(file_list): 17 | relative_file_name = file_name.replace(prj_path, '.') 18 | progress(i / len(file_list), desc=f'正在阅读:{relative_file_name}') 19 | 20 | with open(file_name, 'r', encoding='utf-8') as f: 21 | file_content = f.read() 22 | 23 | sys_prompt = "你是一位资深的程序员,正在帮一位新手程序员阅读某个开源项目,我会把每个文件的内容告诉你," \ 24 | "你需要做一个新手程序员阅读的,简单明了的总结。用MarkDown格式返回(必要的话可以用emoji表情增加趣味性)" 25 | user_prompt = f"源文件路径:{relative_file_name},源代码:\n```\n{file_content}```" 26 | 27 | response = gpt_server.request_llm(sys_prompt, [(user_prompt, None)]) 28 | llm_response[file_name] = next(response) 29 | 30 | return '阅读完成' 31 | 32 | 33 | def get_lang_from_file(file_name): 34 | if file_name.endswith('.py'): 35 | return 'python' 36 | elif file_name.endswith('.md'): 37 | return 'markdown' 38 | elif file_name.endswith('.json'): 39 | return 'json' 40 | elif file_name.endswith('.html'): 41 | return 'html' 42 | elif file_name.endswith('.css'): 43 | return 'css' 44 | elif file_name.endswith('.yaml'): 45 | return 'yaml' 46 | elif file_name.endswith('.sh'): 47 | return 'shell' 48 | elif file_name.endswith('.js'): 49 | return 'javascript' 50 | 51 | return None 52 | 53 | 54 | def view_prj_file(selected_file): 55 | global llm_response 56 | if not llm_response or selected_file not in llm_response: # 没有gpt的结果,只查看代码 57 | gpt_res_update = gr.update('gpt_res', visible=False) 58 | gpt_label_update = gr.update('gpt_label', visible=False) 59 | gpt_res_text = '' 60 | else: 61 | gpt_res_update = gr.update('gpt_res', visible=True) 62 | gpt_label_update = gr.update('gpt_label', visible=True) 63 | gpt_res_text = llm_response[selected_file] 64 | 65 | if selected_file.endswith('.py'): 66 | yield gr.update('code', visible=True, language='python'), gpt_label_update, gpt_res_update 67 | elif selected_file.endswith('.json'): 68 | yield gr.update('code', visible=True, language='json'), gpt_label_update, gpt_res_update 69 | else: 70 | yield gr.update('code', visible=True, language=None), gpt_label_update, gpt_res_update 71 | 72 | yield (selected_file,), [[None, None]], gpt_res_text 73 | 74 | 75 | def gen_prj_summary_prompt(): 76 | prefix_prompt = '这里有一个代码项目,里面的每个文件的功能已经被总结过了。' \ 77 | '你需要根据每个文件的总结内容,做一个整体总结,简单明了,突出重点。' \ 78 | '用Markdown格式返回,必要时可以使用emoji表情。每个文件路径以及总结如下:\n' 79 | 80 | prompt = prefix_prompt 81 | for file_path, file_summary in llm_response.items(): 82 | file_prompt = f'文件名:{file_path}\n文件总结:{file_summary} \n\n' 83 | prompt = f'{prompt}{file_prompt}' 84 | 85 | suffix_prompt = '你做的是类似"README"对整个项目的总结,而不需要再对单个文件做总结。"' 86 | return f'{prompt}{suffix_prompt}' 87 | 88 | 89 | def prj_chat(user_in_text: str, prj_chatbot: list): 90 | sys_prompt = "你是一位资深的导师,指导算法专业的毕业生写论文,这里有些代码需要总结,也有一些论文改写工作需要你指导。" 91 | prj_chatbot.append([user_in_text, '']) 92 | yield prj_chatbot 93 | 94 | if user_in_text == '总结整个项目': # 新起对话,总结项目 95 | new_prompt = gen_prj_summary_prompt() 96 | print(new_prompt) 97 | llm_response = gpt_server.request_llm(sys_prompt, [(new_prompt, None)], stream=True) 98 | else: 99 | llm_response = gpt_server.request_llm(sys_prompt, prj_chatbot, stream=True) 100 | 101 | for chunk_content in llm_response: 102 | prj_chatbot[-1][1] = chunk_content 103 | yield prj_chatbot 104 | 105 | 106 | def clear_textbox(): 107 | return '' 108 | 109 | 110 | def view_uncmt_file(selected_file): 111 | lang = get_lang_from_file(selected_file) 112 | return gr.update(language=lang, value=(selected_file,)), gr.update(variant='primary', interactive=True, 113 | value='添加注释'), gr.update(visible=False) 114 | 115 | 116 | def ai_comment(btn_name, selected_file): 117 | if btn_name != '添加注释': 118 | yield btn_name, gr.update(visible=False) 119 | else: 120 | yield '注释添加中...', gr.update(visible=False) 121 | 122 | lang = get_lang_from_file(selected_file) 123 | with open(selected_file, 'r', encoding='utf-8') as f: 124 | file_content = f.read() 125 | sys_prompt = "你是一位资深的程序员,能够读懂任何代码,并为其增加中文注释,如果是函数,需要为函数docstrings格式的注释。" \ 126 | "直接返回修改的结果,不需要其他额外的解释。" 127 | user_prompt = f"源代码:\n```{file_content}```" 128 | 129 | response = gpt_server.request_llm(sys_prompt, [(user_prompt, None)]) 130 | res_code = next(response) 131 | if res_code.startswith('```') and res_code.endswith('```'): 132 | code_blocks = re.findall(r'```(?:\w+)?\n(.*?)\n```', res_code, re.DOTALL) 133 | res_code = code_blocks[0] 134 | 135 | yield '添加注释', gr.update(visible=True, language=lang, value=res_code) 136 | 137 | 138 | def model_change(model_name): 139 | gpt_server.set_llm(model_name) 140 | 141 | return model_name 142 | 143 | 144 | def view_raw_lang_code_file(selected_file): 145 | lang = get_lang_from_file(selected_file) 146 | return gr.update(language=lang, value=(selected_file,))\ 147 | , gr.update(variant='primary', interactive=True, value='转换')\ 148 | , gr.update(visible=False) 149 | 150 | 151 | def change_code_lang(btn_name, raw_code, to_lang): 152 | if btn_name != '转换': 153 | yield btn_name, gr.update(visible=False) 154 | else: 155 | yield '语言转换中...', gr.update(visible=False) 156 | 157 | sys_prompt = f"你是一位资深的程序员,可以一些任何编程语言的代码,我需要你将下面的代码转成`{to_lang}`语言的代码。要求:\n" \ 158 | f"- 保证转换后的代码是正确的\n" \ 159 | f"- 对于无法转换的情况,可以不转,但需要进行说明\n" \ 160 | f"- 如果遇到第三方库,需要说明在目标变成语言中,依赖什么库,如果目标编程语言没有对应的库,也进行说明\n" \ 161 | f"- 用Markdown格式返回,内容简单明了,不要太啰嗦" 162 | user_prompt = f"源代码:\n```{raw_code}```" 163 | 164 | response = gpt_server.request_llm(sys_prompt, [(user_prompt, None)]) 165 | res = next(response) 166 | 167 | yield '转换', gr.update(visible=True, value=res) 168 | -------------------------------------------------------------------------------- /llms/chatglm.py: -------------------------------------------------------------------------------- 1 | from modelscope import snapshot_download, AutoTokenizer, AutoModel 2 | 3 | from llms.llm import LLM 4 | 5 | 6 | class ChatGLM3(LLM): 7 | 8 | def __init__(self, model_name): 9 | super().__init__(model_name) 10 | 11 | model_dir = snapshot_download("ZhipuAI/chatglm3-6b", revision="v1.0.0") 12 | self.tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) 13 | self.model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda() 14 | self.model = self.model.eval() 15 | 16 | def request(self, sys_prompt, user_prompt: list, stream=False): 17 | query, _ = user_prompt[-1] 18 | query = f'{sys_prompt}\n\n{query}' 19 | history = [] 20 | for user_content, assistant_content in user_prompt[:-1]: 21 | history.append({'role': 'user', 'content': user_content}) 22 | history.append({'role': 'assistant', 'content': assistant_content}) 23 | 24 | if stream: 25 | response = self.model.stream_chat(self.tokenizer, query, history=history) 26 | for chunk in response: 27 | yield chunk[0] 28 | else: 29 | response, _ = self.model.chat(self.tokenizer, query, history=history) 30 | yield response -------------------------------------------------------------------------------- /llms/chatgpt.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | 3 | from llms.llm import LLM 4 | 5 | 6 | class ChatGPT(LLM): 7 | def __init__(self, model_name): 8 | super().__init__(model_name) 9 | self.client = OpenAI() 10 | 11 | def get_response(self, response, stream: bool): 12 | if stream: 13 | return response 14 | # collected_chunks = [] 15 | # collected_messages = [] 16 | # 17 | # for chunk in response: 18 | # collected_chunks.append(chunk) 19 | # chunk_message = chunk.choices[0].delta 20 | # if chunk_message.content is not None: 21 | # collected_messages.append(chunk_message) 22 | # 23 | # return ''.join(collected_messages) 24 | else: 25 | return response.choices[0].message.content 26 | 27 | def request(self, sys_prompt, user_prompt: list, stream=False): 28 | req_msgs = [ 29 | {"role": "system", "content": f"{sys_prompt}"}, 30 | ] 31 | for prompt in user_prompt: 32 | req_msgs.append({"role": "user", "content": f"{prompt[0]}"}) 33 | 34 | if len(prompt) == 2: 35 | req_msgs.append({"role": "assistant", "content": f"{prompt[1]}"}) 36 | else: 37 | break 38 | 39 | response = self.client.chat.completions.create( 40 | model=self.model_name, 41 | messages=req_msgs, 42 | stream=stream 43 | ) 44 | 45 | if stream: 46 | res = '' 47 | for chunk in response: 48 | chunk_message = chunk.choices[0].delta 49 | if chunk_message.content is not None and chunk_message.content != '': 50 | res = f'{res}{chunk_message.content}' 51 | yield res 52 | 53 | else: 54 | yield response.choices[0].message.content -------------------------------------------------------------------------------- /llms/llm.py: -------------------------------------------------------------------------------- 1 | 2 | class LLM: 3 | def __init__(self, model_name): 4 | self.model_name = model_name 5 | 6 | def request(self, sys_prompt, user_prompt: list, stream=False): 7 | pass 8 | -------------------------------------------------------------------------------- /llms/qwen.py: -------------------------------------------------------------------------------- 1 | from llms.llm import LLM 2 | from modelscope import AutoModelForCausalLM, AutoTokenizer 3 | from modelscope import GenerationConfig 4 | 5 | 6 | class Qwen(LLM): 7 | def __init__(self, model_name): 8 | super().__init__(model_name) 9 | 10 | self.tokenizer = AutoTokenizer.from_pretrained(f'qwen/{model_name}', trust_remote_code=True) 11 | self.model = AutoModelForCausalLM.from_pretrained(f'qwen/{model_name}', device_map="auto", trust_remote_code=True).eval() 12 | 13 | def request(self, sys_prompt, user_prompt: list, stream=False): 14 | query, _ = user_prompt[-1] 15 | query = f'{sys_prompt}\n\n{query}' 16 | history = [] 17 | for user_content, assistant_content in user_prompt[:-1]: 18 | history.append({'role': 'user', 'content': user_content}) 19 | history.append({'role': 'assistant', 'content': assistant_content}) 20 | 21 | if stream: 22 | response = self.model.chat_stream(self.tokenizer, query, history=history) 23 | for chunk in response: 24 | yield chunk 25 | else: 26 | response, _ = self.model.chat(self.tokenizer, query, history=history) 27 | yield response -------------------------------------------------------------------------------- /llms/requirements_chatglm3.txt: -------------------------------------------------------------------------------- 1 | protobuf 2 | transformers>=4.30.2 3 | cpm_kernels 4 | torch>=2.0 5 | gradio==4.4.1 6 | mdtex2html 7 | sentencepiece 8 | modelscope 9 | -------------------------------------------------------------------------------- /llms/requirements_qwen.txt: -------------------------------------------------------------------------------- 1 | transformers==4.32.0 2 | accelerate 3 | tiktoken 4 | einops 5 | scipy 6 | transformers_stream_generator==0.0.4 7 | peft 8 | gradio==4.4.1 9 | modelscope 10 | auto-gptq 11 | optimum -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | import gradio as gr 5 | 6 | import config 7 | import gr_funcs 8 | 9 | 10 | def main(prj_dir): 11 | css = """ 12 | #prg_chatbot { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 13 | #prg_tb { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 14 | #paper_file { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 15 | #paper_cb { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 16 | #paper_tb { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 17 | #box_shad { box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.6); /* 设置阴影 */ } 18 | 19 | .markdown-class { 20 | max-height: 800px; 21 | overflow-y: scroll; 22 | } 23 | """ 24 | with gr.Blocks(title="程序员好基友", theme=gr.themes.Soft(), analytics_enabled=False, css=css) as demo: 25 | prj_name_tb = gr.Textbox(value=f'{prj_dir}', visible=False) # 没有实际含义 26 | with gr.Accordion(label='选择模型(选择开源大模型,如果本地没有,会自动下载,下载完毕后再使用下面的功能)'): 27 | model_selector = gr.Dropdown(choices=config.model_list, container=False, elem_id='box_shad') 28 | with gr.Row(): 29 | prj_fe = gr.FileExplorer(label='项目文件', root=prj_dir, file_count='single', scale=1) 30 | 31 | with gr.Accordion('阅读项目', open=False): 32 | with gr.Row(): 33 | code = gr.Code(label='代码', visible=False, elem_id='code', scale=2) 34 | with gr.Column(): 35 | gpt_label = gr.Chatbot(label='项目阅读助手', height=40, visible=False, elem_id='gpt_label') # 没有实际含义 36 | gpt_md = gr.Markdown(visible=False, elem_id='llm_res', elem_classes='markdown-class') 37 | 38 | with gr.Row(): 39 | dir_submit_btn = gr.Button('阅读项目', variant='primary') 40 | 41 | with gr.Row(): 42 | label = gr.Label(label="源码阅读进度", value='等待开始...') 43 | 44 | with gr.Accordion(label='对话模式', open=False): 45 | with gr.Tab('论文改写助手'): 46 | with gr.Row(): 47 | prj_chatbot = gr.Chatbot(label='gpt', elem_id='prg_chatbot') 48 | with gr.Row(): 49 | prj_chat_txt = gr.Textbox(label='输入框', 50 | value='总结整个项目', 51 | placeholder='请输入...', 52 | container=False, 53 | interactive=True, 54 | scale=5, 55 | elem_id='prg_tb') 56 | prj_chat_btn = gr.Button(value='发送', variant='primary', scale=1, min_width=100) 57 | with gr.Tab('论文阅读助手'): 58 | with gr.Row(): 59 | reader_paper = gr.File(scale=1, elem_id='paper_file') 60 | with gr.Column(scale=2): 61 | with gr.Row(): 62 | gr.Chatbot(label='论文阅读', scale=2, elem_id='paper_cb') 63 | with gr.Row(): 64 | gr.Text(container=False, scale=2, elem_id='paper_tb', placeholder='请输入...',) 65 | gr.Button('发送', min_width=50, scale=1, variant='primary') 66 | 67 | with gr.Accordion(label='代码注释', open=False, elem_id='code_cmt'): 68 | code_cmt_btn = gr.Button('选择一个源文件', variant='secondary', interactive=False) 69 | with gr.Row(): 70 | uncmt_code = gr.Code(label='原代码', elem_id='uncmt_code') 71 | cmt_code = gr.Code(label='注释后代码', elem_id='cmt_code', visible=False) 72 | 73 | with gr.Accordion(label='语言转换', open=False, elem_id='code_lang_change'): 74 | with gr.Row(): 75 | lang_to_change = [ 76 | 'java', 'python', 'javascript', 'c++', 'php', 'go', 'r', 'perl', 'swift', 'ruby' 77 | ] 78 | to_lang = gr.Dropdown(choices=lang_to_change, container=False, value=lang_to_change[0], elem_id='box_shad', interactive=True, scale=2) 79 | code_lang_ch_btn = gr.Button('选择一个源文件', variant='secondary', interactive=False, scale=1) 80 | with gr.Row(): 81 | raw_lang_code = gr.Code(label='原代码', elem_id='uncmt_code') 82 | code_lang_changed_md = gr.Markdown(label='转换代码语言', visible=False, elem_id='box_shad') 83 | # lang_changed_code = gr.Code(label='抓换后代码', elem_id='cmt_code', visible=False) 84 | 85 | # 模型选择 86 | model_selector.select(gr_funcs.model_change, inputs=[model_selector], outputs=[model_selector]) 87 | 88 | # 监听阅读按钮 89 | dir_submit_btn.click(gr_funcs.analyse_project, inputs=[prj_name_tb], outputs=[label]) 90 | # 监听文件点击按钮 91 | prj_fe.change(gr_funcs.view_prj_file, inputs=[prj_fe], outputs=[code, gpt_label, gpt_md]) 92 | 93 | # 监听 prj_chat_btn 按钮 94 | prj_chat_btn.click(gr_funcs.prj_chat, inputs=[prj_chat_txt, prj_chatbot], outputs=[prj_chatbot]) 95 | prj_chat_btn.click(gr_funcs.clear_textbox, outputs=prj_chat_txt) 96 | 97 | # 代码注释模式 98 | prj_fe.change(gr_funcs.view_uncmt_file, inputs=[prj_fe], outputs=[uncmt_code, code_cmt_btn, cmt_code]) 99 | code_cmt_btn.click(gr_funcs.ai_comment, inputs=[code_cmt_btn, prj_fe], outputs=[code_cmt_btn, cmt_code]) 100 | 101 | # 语言转换模式 102 | prj_fe.change(gr_funcs.view_raw_lang_code_file, 103 | inputs=[prj_fe], 104 | outputs=[raw_lang_code, code_lang_ch_btn, code_lang_changed_md]) 105 | code_lang_ch_btn.click(gr_funcs.change_code_lang, 106 | inputs=[code_lang_ch_btn, raw_lang_code, to_lang], 107 | outputs=[code_lang_ch_btn, code_lang_changed_md]) 108 | 109 | demo.launch(share=False) 110 | 111 | 112 | if __name__ == '__main__': 113 | config.init_config() 114 | main(os.environ['PRJ_DIR']) 115 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai==1.3.3 2 | gradio==4.4.1 3 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def get_all_files_in_folder(folder_path): 5 | file_list = [] 6 | anon_dirs = [] 7 | for root, dirs, files in os.walk(folder_path): 8 | if root in anon_dirs: 9 | anon_dirs.extend([os.path.join(root, dir) for dir in dirs]) 10 | else: 11 | anon_dirs.extend([os.path.join(root, dir) for dir in dirs if dir.startswith('.')]) 12 | 13 | if root in anon_dirs: 14 | continue 15 | for file in files: 16 | if not file.startswith('.'): 17 | file_list.append(os.path.join(root, file)) 18 | return file_list 19 | --------------------------------------------------------------------------------