├── readme.md └── prompt_settings.py /readme.md: -------------------------------------------------------------------------------- 1 | # Official code for Prompt-Engineering for Open-Source LLMs 2 | 3 | This is a collaboration between [Lamini](https://lamini.ai/) and [Deeplearning.ai](https://www.deeplearning.ai/short-courses/finetuning-large-language-models/). Taught by [Sharon Zhou](). 4 | 5 | ### Lamini API key (free) 6 | Navigate [here](https://app.lamini.ai/) to copy your key. 7 | 8 | ### I want more 9 | OK if you want more, [let us know](https://www.lamini.ai/contact). Particularly for enterprise usage. 10 | 11 | ### Other resources 12 | 13 | - [Simple RAG](https://github.com/lamini-ai/simple-rag): RAG in 80 lines of code. Fast, simple, easy. 80% of what you need. RAG is a form of prompt-engineering. 14 | 15 | - [Lamini SDK](https://github.com/lamini-ai/lamini-sdk/tree/main) 16 | 17 | - [Lamini docs](https://lamini-ai.github.io/) 18 | 19 | -------------------------------------------------------------------------------- /prompt_settings.py: -------------------------------------------------------------------------------- 1 | from lamini import Lamini 2 | 3 | # 4 | # Mistral 7B 5 | # 6 | 7 | llm = Lamini(model_name="mistralai/Mistral-7B-Instruct-v0.1") 8 | 9 | prompts = [ 10 | "Given the fact that I'm drinking green juice, am I healthy?", 11 | "Respond kindly to the child: i really hate zucchini. why should i eat it?", 12 | "vscode - how to find code that has been checked in a long time ago, e.g. more than 6 months ago", 13 | """what does an example jsonl file look like that is loaded with this function? def load_examples(self): 14 | filename = self.saved_examples_path 15 | if not os.path.exists(filename): 16 | return {} 17 | 18 | # load the examples from the jsonl file using the jsonlines library 19 | with jsonlines.open(filename) as reader: 20 | examples = {} 21 | for row in reader: 22 | class_name = row["class_name"] 23 | example = row["examples"] 24 | self.add_class(class_name) 25 | examples[class_name] = example 26 | 27 | return examples""", 28 | ] 29 | 30 | for prompt in prompts: 31 | print(f"=============Prompt: {prompt}=============") 32 | print(llm.generate(prompt)) 33 | print(f"=============Prompt: [INST] {prompt} [/INST]=============") 34 | print(llm.generate(f"[INST] {prompt} [/INST]")) 35 | 36 | 37 | # 38 | # Llama 2 39 | # 40 | 41 | llm_compare = Lamini(model_name="meta-llama/Llama-2-7b-chat-hf") 42 | 43 | prompts = [ 44 | { 45 | "system": "You are a healths food nut.", 46 | "user": "I'm drinking green juice", 47 | }, 48 | ] 49 | 50 | for prompt in prompts: 51 | concat_prompt = f"{prompt['system']} {prompt['user']}" 52 | hydrated_prompt = f"[INST] <>\n{prompt['system']}\n<>\n{prompt['user']} [/INST]" 53 | print(f"=============Prompt: {concat_prompt}=============") 54 | print(llm_compare.generate(concat_prompt)) 55 | print(f"=============Prompt: {hydrated_prompt}=============") 56 | print(llm_compare.generate(hydrated_prompt)) 57 | 58 | --------------------------------------------------------------------------------