├── ComfyUI_Llama3_8B_Node.py ├── README.md ├── __init__.py ├── example ├── example ├── example1.png ├── example2.png ├── example3.png └── example4.png └── pyproject.toml /ComfyUI_Llama3_8B_Node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import re 4 | import transformers 5 | from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel 6 | import torch 7 | from PIL import Image 8 | import folder_paths 9 | 10 | dir_path = os.path.dirname(os.path.abspath(__file__)) 11 | path_dir = os.path.dirname(dir_path) 12 | file_path = os.path.dirname(path_dir) 13 | def string_punctuation_bool(string_in): 14 | pattern = r"[^\w\s]$" 15 | string_bool = bool(re.search(pattern, string_in)) 16 | return string_bool 17 | 18 | def trans_reply(reply_language,user_content): 19 | if string_punctuation_bool(user_content): 20 | join_punctuation = " " 21 | else: 22 | join_punctuation = "," 23 | if reply_language == "chinese": 24 | user_content = f"{join_punctuation}".join([user_content, "用中文回复我"]) 25 | elif reply_language == "russian": 26 | user_content = f"{join_punctuation}".join([user_content, "Ответь мне по - русски"]) 27 | elif reply_language == "german": 28 | user_content = f"{join_punctuation}".join([user_content, "Antworte mir auf Deutsch"]) 29 | elif reply_language == "french": 30 | user_content = f"{join_punctuation}".join([user_content, "Répondez - moi en français"]) 31 | elif reply_language == "spanish": 32 | user_content = f"{join_punctuation}".join([user_content, "Contáctame en español"]) 33 | elif reply_language == "japanese": 34 | user_content = f"{join_punctuation}".join([user_content, "日本語で返事して"]) 35 | elif reply_language == "english": 36 | user_content = f"{join_punctuation}".join([user_content, "answer me in English"]) 37 | else: 38 | user_content = f"{join_punctuation}".join([user_content, "Reply to me in the language of my question mentioned above"]) 39 | return user_content 40 | 41 | paths = [] 42 | for search_path in folder_paths.get_folder_paths("diffusers"): 43 | if os.path.exists(search_path): 44 | for root, subdir, files in os.walk(search_path, followlinks=True): 45 | if "model.safetensors.index.json" in files: 46 | paths.append(os.path.relpath(root, start=search_path)) 47 | 48 | if paths != []: 49 | paths = [] + [x for x in paths if x] 50 | else: 51 | paths = ["no llama3 model in default diffusers directory", ] 52 | 53 | 54 | def get_local_path(file_path, model_path): 55 | path = os.path.join(file_path, "models", "diffusers", model_path) 56 | model_path = os.path.normpath(path) 57 | if sys.platform=='win32': 58 | model_path = model_path.replace('\\', "/") 59 | return model_path 60 | 61 | 62 | def tensor_to_image(tensor): 63 | tensor = tensor.cpu() 64 | image_np = tensor.squeeze().mul(255).clamp(0, 255).byte().numpy() 65 | image = Image.fromarray(image_np, mode='RGB') 66 | return image 67 | 68 | 69 | def get_instance_path(path): 70 | instance_path = os.path.normpath(path) 71 | if sys.platform=='win32': 72 | instance_path = instance_path.replace('\\', "/") 73 | return instance_path 74 | 75 | 76 | class Local_Or_Repo_Choice: 77 | def __init__(self): 78 | pass 79 | 80 | @classmethod 81 | def INPUT_TYPES(cls): 82 | return { 83 | "required": { 84 | "local_model_path": (paths,), 85 | "repo_id": ("STRING", {"default": "THUDM/cogvlm2-llama3-chat-19B"}) 86 | } 87 | } 88 | 89 | RETURN_TYPES = ("STRING",) 90 | RETURN_NAMES = ("repo_id",) 91 | FUNCTION = "repo_choice" 92 | CATEGORY = "Meta_Llama3" 93 | 94 | def repo_choice(self, local_model_path, repo_id): 95 | if repo_id == "": 96 | if local_model_path == ["no llama3 model in default diffusers directory", ]: 97 | raise "you need fill repo_id or download model in diffusers directory " 98 | elif local_model_path != ["no llama3 model in default diffusers directory", ]: 99 | model_path = get_local_path(file_path, local_model_path) 100 | repo_id = get_instance_path(model_path) 101 | elif repo_id != "" and repo_id.find("/") == -1: 102 | raise "Incorrect repo_id format" 103 | elif repo_id != "" and repo_id.find("\\") != -1: 104 | repo_id = get_instance_path(repo_id) 105 | return (repo_id,) 106 | 107 | 108 | class Meta_Llama3_8B: 109 | def __init__(self): 110 | pass 111 | 112 | @classmethod 113 | def INPUT_TYPES(cls): 114 | return { 115 | "required": { 116 | "repo_id": ("STRING", {"forceInput": True}), 117 | "max_new_tokens": ("INT", {"default": 256, "min": 32, "max": 4096, "step": 32, "display": "number"}), 118 | "temperature": ( 119 | "FLOAT", 120 | {"default": 0.6, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 121 | "top_p": ( 122 | "FLOAT", 123 | {"default": 0.9, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 124 | "get_model_online": ("BOOLEAN", {"default": True},), 125 | "reply_language": (["english", "chinese", "russian", "german", "french", "spanish", "japanese","Original_language"],), 126 | "system_content": ( 127 | "STRING", {"multiline": True, "default": "你叫何小喵,是一位回复私人对话的二次元白发傲娇猫娘助手"}), 128 | "user_content": ("STRING", {"multiline": True, "default": "何小喵,你喜欢吃什么?"}) 129 | } 130 | } 131 | 132 | RETURN_TYPES = ("STRING",) 133 | RETURN_NAMES = ("prompt",) 134 | FUNCTION = "meta_llama3_8b" 135 | CATEGORY = "Meta_Llama3" 136 | 137 | def meta_llama3_8b(self, repo_id, max_new_tokens, temperature, top_p, get_model_online, reply_language, 138 | system_content, user_content): 139 | user_content = trans_reply(reply_language, user_content) 140 | if not get_model_online: 141 | os.environ['TRANSFORMERS_OFFLINE'] = "1" 142 | try: 143 | pipeline = transformers.pipeline( 144 | "text-generation", 145 | model=repo_id, 146 | model_kwargs={"torch_dtype": torch.bfloat16}, 147 | device_map="auto", 148 | ) 149 | messages = [ 150 | {"role": "system", "content": f"{system_content}"}, 151 | {"role": "user", "content": f"{user_content}"}, 152 | ] 153 | 154 | prompt = pipeline.tokenizer.apply_chat_template( 155 | messages, 156 | tokenize=False, 157 | add_generation_prompt=True 158 | ) 159 | 160 | terminators = [ 161 | pipeline.tokenizer.eos_token_id, 162 | pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") 163 | ] 164 | 165 | outputs = pipeline( 166 | prompt, 167 | max_new_tokens=max_new_tokens, 168 | eos_token_id=terminators, 169 | do_sample=True, 170 | temperature=temperature, 171 | top_p=top_p, 172 | ) 173 | prompt_output = (outputs[0]["generated_text"]) 174 | text_assistant = "<|eot_id|><|start_header_id|>assistant<|end_header_id|>" 175 | prompt_output = prompt_output.split(text_assistant, 1)[1] 176 | prompt_output = prompt_output.replace('*', ' *') 177 | # print(type(prompt_output), prompt_output) 178 | return (prompt_output,) 179 | except Exception as e: 180 | return (e,) 181 | 182 | 183 | class ChatQA_1p5_8b: 184 | def __init__(self): 185 | pass 186 | 187 | @classmethod 188 | def INPUT_TYPES(cls): 189 | return { 190 | "required": { 191 | "repo_id": ("STRING", {"forceInput": True}), 192 | "max_new_tokens": ("INT", {"default": 128, "min": 32, "max": 4096, "step": 32, "display": "number"}), 193 | "temperature": ( 194 | "FLOAT", 195 | {"default": 0.6, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 196 | "top_p": ( 197 | "FLOAT", 198 | {"default": 0.9, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 199 | "get_model_online": ("BOOLEAN", {"default": True},), 200 | "reply_language": (["english", "chinese", "russian", "german", "french", "spanish", "japanese","Original_language"],), 201 | "system": ( 202 | "STRING", 203 | {"multiline": True, 204 | "default": "System: This is a chat between a user and an artificial intelligence" 205 | " assistant. The assistant gives helpful, detailed, and polite answers " 206 | "to the user's questions based on the context. The assistant should also " 207 | "indicate when the answer cannot be found in the context."}), 208 | "instruction": ( 209 | "STRING", 210 | {"multiline": True, "default": "Please give a full and complete answer for the question."}), 211 | "user_content": ("STRING", {"multiline": True, 212 | "default": "你是一位撰写提示词的高级助理,现在给我写一个关于'一只小猫,穿着宇航服,漫步在月球表面的'的提示词"}) 213 | } 214 | } 215 | 216 | RETURN_TYPES = ("STRING",) 217 | RETURN_NAMES = ("prompt",) 218 | FUNCTION = "chatqa_1p5_8b" 219 | CATEGORY = "Meta_Llama3" 220 | 221 | def get_formatted_input(self, system, instruction, messages, context): 222 | for item in messages: 223 | if item['role'] == "user": 224 | # only apply this instruction for the first user turn 225 | item['content'] = instruction + " " + item['content'] 226 | break 227 | 228 | conversation = '\n\n'.join( 229 | ["User: " + item["content"] if item["role"] == "user" else "Assistant: " + item["content"] for 230 | item in messages]) + "\n\nAssistant:" 231 | formatted_input = system + "\n\n" + context + "\n\n" + conversation 232 | 233 | return formatted_input 234 | 235 | def chatqa_1p5_8b(self, repo_id, max_new_tokens, temperature, top_p, get_model_online, reply_language, system, 236 | instruction, 237 | user_content): 238 | user_content = trans_reply(reply_language, user_content) 239 | if not get_model_online: 240 | os.environ['TRANSFORMERS_OFFLINE'] = "1" 241 | try: 242 | tokenizer = AutoTokenizer.from_pretrained(repo_id) 243 | model = AutoModelForCausalLM.from_pretrained(repo_id, torch_dtype=torch.float16, device_map="auto") 244 | messages = [{"role": "user", "content": user_content}] 245 | document = "" 246 | formatted_input = self.get_formatted_input(system, instruction, messages, document) 247 | tokenized_prompt = tokenizer(tokenizer.bos_token + formatted_input, return_tensors="pt").to( 248 | model.device) 249 | 250 | terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")] 251 | outputs = model.generate(input_ids=tokenized_prompt.input_ids, 252 | attention_mask=tokenized_prompt.attention_mask, 253 | do_sample=True, 254 | temperature=temperature, 255 | top_p=top_p, 256 | max_new_tokens=max_new_tokens, 257 | eos_token_id=terminators) 258 | response = outputs[0][tokenized_prompt.input_ids.shape[-1]:] 259 | print(tokenizer.decode(response, skip_special_tokens=True)) 260 | prompt_output = tokenizer.decode(response, skip_special_tokens=True) 261 | if ":" in prompt_output: 262 | prompt_output = prompt_output.split(":", 1)[1] 263 | prompt_output = prompt_output.strip().strip('\'"').replace("\n", " ") 264 | return (prompt_output,) 265 | 266 | except Exception as e: 267 | return (e,) 268 | 269 | 270 | class MiniCPM_Llama3_V25: 271 | def __init__(self): 272 | pass 273 | 274 | @classmethod 275 | def INPUT_TYPES(cls): 276 | return { 277 | "required": { 278 | "image": ("IMAGE",), 279 | "repo_id": ("STRING", {"forceInput": True}), 280 | "max_new_tokens": ("INT", {"default": 2048, "min": 32, "max": 4096, "step": 32, "display": "number"}), 281 | "temperature": ( 282 | "FLOAT", 283 | {"default": 0.7, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 284 | "top_p": ( 285 | "FLOAT", 286 | {"default": 0.9, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}), 287 | "reply_language": (["english", "chinese", "russian", "german", "french", "spanish", "japanese","Original_language"],), 288 | "question": ("STRING", {"multiline": True, 289 | "default": "What is in the image?"}) 290 | } 291 | } 292 | 293 | RETURN_TYPES = ("STRING",) 294 | RETURN_NAMES = ("prompt",) 295 | FUNCTION = "minicpm_llama3_v25" 296 | CATEGORY = "Meta_Llama3" 297 | 298 | def minicpm_llama3_v25(self, image, repo_id, max_new_tokens, temperature, top_p, reply_language, 299 | question): 300 | question = trans_reply(reply_language, question) 301 | try: 302 | model = AutoModel.from_pretrained(repo_id, trust_remote_code=True, 303 | torch_dtype=torch.float16) 304 | model = model.to(device='cuda') 305 | tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True) 306 | model.eval() 307 | image = tensor_to_image(image) 308 | msgs = [{'role': 'user', 'content': question}] 309 | res = model.chat( 310 | image=image, 311 | msgs=msgs, 312 | max_new_tokens=max_new_tokens, 313 | tokenizer=tokenizer, 314 | sampling=True, 315 | top_p=top_p, 316 | temperature=temperature 317 | ) 318 | # print(res) 319 | return (res,) 320 | except Exception as e: 321 | return (e,) 322 | 323 | 324 | NODE_CLASS_MAPPINGS = { 325 | "Local_Or_Repo_Choice": Local_Or_Repo_Choice, 326 | "Meta_Llama3_8B": Meta_Llama3_8B, 327 | "ChatQA_1p5_8b": ChatQA_1p5_8b, 328 | "MiniCPM_Llama3_V25": MiniCPM_Llama3_V25 329 | } 330 | 331 | NODE_DISPLAY_NAME_MAPPINGS = { 332 | "Local_Or_Repo_Choice": "Local_Or_Repo_Choice", 333 | "Meta_Llama3_8B": "Meta_Llama3_8B", 334 | "ChatQA_1p5_8b": "ChatQA_1p5_8b", 335 | "MiniCPM_Llama3_V25": "MiniCPM_Llama3_V25" 336 | } 337 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI_Llama3_8B 2 | Llama3_8B for comfyUI, using pipeline workflow 3 | ----- 4 | Suport models 5 | ---- 6 | meta-llama/Meta-Llama-3-8B-Instruct 7 | gradientai/Llama-3-8B-Instruct-262k 8 | nvidia/Llama3-ChatQA-1.5-8B 9 | openbmb/MiniCPM-Llama3-V-2_5 10 | ... 11 | 12 | Update 13 | ----- 14 | 2024-05-23 更新,加入"openbmb/MiniCPM-Llama3-V-2_5"和模型选择菜单节点 15 | Updated on May 23, 2024, adding "openbmb/MiniCPM-Llama3-V-2-5" and model selection menu node 16 | 17 | Use 18 | ---- 19 | 20 | 下载模型,填写repoid,如“X:/meta-llama/Meta-Llama-3-8B-Instruct"的本地绝对路径,即可使用。 21 | 其他不需要许可的微调模型,可以直接填写,如"gradientai/Llama-3-8B-Instruct-262k",便直接下载模型。 22 | 23 | Download the model,Fill in the repoid, such as the local absolute path of "X:/meta llama/Meta Llama-3-8B Instrument", and it can be used. 24 | Other fine-tuning models that do not require permission can be filled in directly, such as "gradientai/Lama-3-8B-Instrument-262k", and the model can be downloaded directly. Domestic users should pay attention to downloading in advance. 25 | 26 | Example 27 | ---- 28 | ![](https://github.com/smthemex/ComfyUI_Llama3_8B/blob/main/example/example1.png) 29 | 30 | ![](https://github.com/smthemex/ComfyUI_Llama3_8B/blob/main/example/example2.png) 31 | 32 | ![](https://github.com/smthemex/ComfyUI_Llama3_8B/blob/main/example/example3.png) 33 | 34 | ![](https://github.com/smthemex/ComfyUI_Llama3_8B/blob/main/example/example4.png) 35 | 36 | My ComfyUI node list: 37 | ----- 38 | 39 | 1、ParlerTTS node:[ComfyUI_ParlerTTS](https://github.com/smthemex/ComfyUI_ParlerTTS) 40 | 41 | 2、Llama3_8B node:[ComfyUI_Llama3_8B](https://github.com/smthemex/ComfyUI_Llama3_8B) 42 | 43 | 3、HiDiffusion node:[ComfyUI_HiDiffusion_Pro](https://github.com/smthemex/ComfyUI_HiDiffusion_Pro) 44 | 45 | 4、ID_Animator node: [ComfyUI_ID_Animator](https://github.com/smthemex/ComfyUI_ID_Animator) 46 | 47 | 5、StoryDiffusion node:[ComfyUI_StoryDiffusion](https://github.com/smthemex/ComfyUI_StoryDiffusion) 48 | 49 | 6、Pops node:[ComfyUI_Pops](https://github.com/smthemex/ComfyUI_Pops) 50 | 51 | 7、stable-audio-open-1.0 node :[ComfyUI_StableAudio_Open](https://github.com/smthemex/ComfyUI_StableAudio_Open) 52 | 53 | 8、GLM4 node:[ComfyUI_ChatGLM_API](https://github.com/smthemex/ComfyUI_ChatGLM_API) 54 | 55 | 9、CustomNet node:[ComfyUI_CustomNet](https://github.com/smthemex/ComfyUI_CustomNet) 56 | 57 | 10、Pipeline_Tool node :[ComfyUI_Pipeline_Tool](https://github.com/smthemex/ComfyUI_Pipeline_Tool) 58 | 59 | 11、Pic2Story node :[ComfyUI_Pic2Story](https://github.com/smthemex/ComfyUI_Pic2Story) 60 | 61 | 12、PBR_Maker node:[ComfyUI_PBR_Maker](https://github.com/smthemex/ComfyUI_PBR_Maker) 62 | 63 | Citation 64 | ------ 65 | 66 | ``` python 67 | @article{yu2023rlhf, 68 | title={Rlhf-v: Towards trustworthy mllms via behavior alignment from fine-grained correctional human feedback}, 69 | author={Yu, Tianyu and Yao, Yuan and Zhang, Haoye and He, Taiwen and Han, Yifeng and Cui, Ganqu and Hu, Jinyi and Liu, Zhiyuan and Zheng, Hai-Tao and Sun, Maosong and others}, 70 | journal={arXiv preprint arXiv:2312.00849}, 71 | year={2023} 72 | } 73 | @article{viscpm, 74 | title={Large Multilingual Models Pivot Zero-Shot Multimodal Learning across Languages}, 75 | author={Jinyi Hu and Yuan Yao and Chongyi Wang and Shan Wang and Yinxu Pan and Qianyu Chen and Tianyu Yu and Hanghao Wu and Yue Zhao and Haoye Zhang and Xu Han and Yankai Lin and Jiao Xue and Dahai Li and Zhiyuan Liu and Maosong Sun}, 76 | journal={arXiv preprint arXiv:2308.12038}, 77 | year={2023} 78 | } 79 | @article{xu2024llava-uhd, 80 | title={{LLaVA-UHD}: an LMM Perceiving Any Aspect Ratio and High-Resolution Images}, 81 | author={Xu, Ruyi and Yao, Yuan and Guo, Zonghao and Cui, Junbo and Ni, Zanlin and Ge, Chunjiang and Chua, Tat-Seng and Liu, Zhiyuan and Huang, Gao}, 82 | journal={arXiv preprint arXiv:2403.11703}, 83 | year={2024} 84 | } 85 | ``` 86 | ``` python 87 | @article{liu2024chatqa, 88 | title={ChatQA: Surpassing GPT-4 on Conversational QA and RAG}, 89 | author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan}, 90 | journal={arXiv preprint arXiv:2401.10225}, 91 | year={2024}} 92 | ``` 93 | ``` python 94 | @article{llama3modelcard, 95 | title={Llama 3 Model Card}, 96 | author={AI@Meta}, 97 | year={2024}, 98 | url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md} 99 | ``` 100 | 101 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | python = sys.executable 4 | 5 | from .ComfyUI_Llama3_8B_Node import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 6 | 7 | 8 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 9 | -------------------------------------------------------------------------------- /example/example: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /example/example1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smthemex/ComfyUI_Llama3_8B/27913b1f6ec435617311b092aae2a20fc8e966a5/example/example1.png -------------------------------------------------------------------------------- /example/example2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smthemex/ComfyUI_Llama3_8B/27913b1f6ec435617311b092aae2a20fc8e966a5/example/example2.png -------------------------------------------------------------------------------- /example/example3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smthemex/ComfyUI_Llama3_8B/27913b1f6ec435617311b092aae2a20fc8e966a5/example/example3.png -------------------------------------------------------------------------------- /example/example4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smthemex/ComfyUI_Llama3_8B/27913b1f6ec435617311b092aae2a20fc8e966a5/example/example4.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui_llama3_8b" 3 | description = "easy using llama3_8b in comfyUI " 4 | version = "1.0.0" 5 | license = "LICENSE" 6 | 7 | [project.urls] 8 | Repository = "https://github.com/smthemex/ComfyUI_Llama3_8B" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "smthemx" 13 | DisplayName = "ComfyUI_Llama3_8B" 14 | Icon = "" 15 | --------------------------------------------------------------------------------