├── logo.png
├── cacheDIR.png
├── mystreamlitAPP.png
├── mystreamlitAPP-results.png
├── README.md
└── myapp.py
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/llmwareSLIMS/main/logo.png
--------------------------------------------------------------------------------
/cacheDIR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/llmwareSLIMS/main/cacheDIR.png
--------------------------------------------------------------------------------
/mystreamlitAPP.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/llmwareSLIMS/main/mystreamlitAPP.png
--------------------------------------------------------------------------------
/mystreamlitAPP-results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/llmwareSLIMS/main/mystreamlitAPP-results.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Go Open, go Lean: LLMWare now can boost your AI-powered enterprise.
4 | ### Budget-friendly Efficiency: LLMWare makes AI Back-Office dreams a Reality with a herd of new SLIMs models.
5 | Repo of the code from the Medium article
6 |
7 | This repo will work only with python version < 3.12
8 |
9 | Create a new directory and a virtual environment
10 |
11 | ```
12 | mkdir llmware
13 | cd llmware
14 | python -m venv venv
15 | ```
16 |
17 | Activate the venv and install the following packages
18 |
19 | ```
20 | source venv/bin/activate #activate the venv on Mac/Linux
21 | venv\Scripts\activate #activate the venv on Windows
22 | pip install llmware
23 | pip install streamlit
24 | ```
25 |
26 |
27 | Download then 2 files in the project directory:
28 | - myapp.py
29 | - logo.png
30 |
31 | In the terminal, the the venv active run the following command
32 | ```
33 | streamlit run myapp.py
34 | ```
35 |
36 |
37 | During the first execution the application will download the models on your local machine
38 | in the cache HuggingFace directory, something like this:
39 |
40 | ```
41 | C:\Users\User\.cache\huggingface\hub for the non quantized models
42 | C:\Users\User\llmware_data\model_repo for the GGUF files
43 | ```
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/myapp.py:
--------------------------------------------------------------------------------
1 | # Sources
2 | # https://github.com/llmware-ai/llmware/blob/main/examples/SLIM-Agents/agent-multistep-analysis.py
3 | # https://www.youtube.com/watch?v=0MOMBJjytkQ by AI Anytime
4 |
5 | import streamlit as st
6 | from llmware.models import ModelCatalog
7 | from llmware.prompts import Prompt
8 |
9 | def perform_sum(text):
10 | #nli_model = ModelCatalog().load_model("slim-nli-tool")
11 | prompter = Prompt().load_model("llmware/bling-tiny-llama-v0")
12 | instruction = "What is a brief summary?"
13 | response_sum = prompter.prompt_main(instruction, context=text)
14 | return response_sum
15 |
16 | def tags(text):
17 | tags_model = ModelCatalog().load_model("slim-tags-tool")
18 | response_tags = tags_model.function_call(text, get_logits=False)
19 | return response_tags
20 |
21 | def topics(text):
22 | topics_model = ModelCatalog().load_model("slim-topics-tool")
23 | response_topics = topics_model.function_call(text, get_logits=False)
24 | return response_topics
25 |
26 | def intent(text):
27 | intent_model = ModelCatalog().load_model("slim-intent-tool")
28 | response_intent = intent_model.function_call(text, get_logits=False)
29 | return response_intent
30 |
31 | def category(text):
32 | category_model = ModelCatalog().load_model("slim-category-tool")
33 | response_category = category_model.function_call(text, get_logits=False)
34 | return response_category
35 |
36 | def ner(text):
37 | ner_model = ModelCatalog().load_model("slim-ner-tool")
38 | response_ner = ner_model.function_call(text, get_logits=False)
39 | return response_ner
40 |
41 |
42 | # Streamlit app layout
43 | st.image('logo.png',use_column_width="auto")
44 | st.title("Intensive Enterprise NLP Tasks")
45 | st.markdown("### using only CPU resources")
46 |
47 | # Text input
48 | text = st.text_area("Enter text here:")
49 |
50 | # Analysis tools selection
51 | analysis_tools = st.multiselect(
52 | "Select the analysis tools to use:",
53 | ["Generate Tags", "Identify Topics",
54 | "Perform Intent", "Get Category",
55 | "Perform NER", "Perform Summarization"],
56 | ["Generate Tags"] # Default selection
57 | )
58 |
59 | # Run the selected TASKS/Agents and display results in plain json format
60 | if st.button("Analyze"):
61 | results = {}
62 |
63 | if "Generate Tags" in analysis_tools:
64 | results["Generate Tags"] = tags(text)
65 | if "Identify Topics" in analysis_tools:
66 | results["Identify Topics"] = topics(text)
67 | if "Perform Intent" in analysis_tools:
68 | results["Perform Intent"] = intent(text)
69 | if "Get Category" in analysis_tools:
70 | results["Get Category"] = category(text)
71 | if "Perform NER" in analysis_tools:
72 | results["Perform NER"] = ner(text)
73 | if "Perform Summarization" in analysis_tools:
74 | results["Perform Summarization"] = perform_sum(text)
75 |
76 | for tool, response in results.items():
77 | st.subheader(tool)
78 | st.json(response)
--------------------------------------------------------------------------------