├── .gitignore
├── README.md
├── demos
├── .DS_Store
├── module3
│ ├── Keyword.ipynb
│ └── Semantic.ipynb
├── module4
│ ├── OU ChatBot Setup-V1.pdf
│ ├── demo-chains.py
│ ├── demo-chroma-create.py
│ ├── demo-create-eval-dataset.py
│ ├── demo-eval-model.py
│ ├── demo-faiss-create.py
│ ├── demo-memory.py
│ ├── demo-oci-genai-ads.py
│ ├── demo-ou-chatbot-chroma-final.py
│ ├── demo-prompts.py
│ ├── demo-retrieval-chroma.py
│ ├── demo-retrieval-faiss.py
│ ├── demo-retrieval-memory-chroma-traces.py
│ ├── demo-runnable-parallel.py
│ ├── demo-sessions.py
│ └── demo-streamlit.py
├── module4_new
│ ├── LoadProperties.py
│ ├── config.txt
│ ├── demo-RAG-Oracle23ai-Retrieval.py
│ ├── demo-RAG-oracle23ai-Embed.py
│ ├── demo-chains-v1.py
│ ├── demo-chroma-create-v1.py
│ ├── demo-create-eval-dataset-v1.py
│ ├── demo-environment.yml
│ ├── demo-eval-model-v1.py
│ ├── demo-faiss-create-v1.py
│ ├── demo-memory-v1.py
│ ├── demo-oci-genai-ads-v1.py
│ ├── demo-ou-chatbot-chroma-final-v1.py
│ ├── demo-ou-chatbot-chroma-final-v2.py
│ ├── demo-ou-chatbot-chroma-v1.py
│ ├── demo-prompts-v1.py
│ ├── demo-retrieval-chroma-v1.py
│ ├── demo-retrieval-faiss-v1.py
│ ├── demo-retrieval-memory-chroma-traces-v1.py
│ ├── demo-retrieval-memory-faiss-v1.py
│ ├── demo-runnable-parallel-v1.py
│ ├── demo-sessions-v1.py
│ ├── demo-streamlit-v1.py
│ └── pdf-docs
│ │ └── oci-ai-foundations.pdf
├── module5
│ ├── faq.txt
│ ├── oci-ai-foundations.pdf
│ └── sql_sheet.sql
└── test_scripts
│ └── oracle23ai_rag.py
└── labs
├── LoadProperties.py
├── config.txt
├── init-genailabs.sh
├── pdf-docs
└── oci-ai-foundations.pdf
└── txt-docs
└── faq.txt
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .DS_Store
3 | demos/.DS_Store
4 | demos/.DS_Store
5 | demos/.DS_Store
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ou-generativeai-pro
2 | ou-generativeai-pro
3 |
--------------------------------------------------------------------------------
/demos/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ou-developers/ou-generativeai-pro/d95e9e524101a071e400498c6b58bd3c762a7109/demos/.DS_Store
--------------------------------------------------------------------------------
/demos/module3/Keyword.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "a8cd8b11",
6 | "metadata": {},
7 | "source": [
8 | "### OCI Data Science - Useful Tips\n",
9 | "\n",
10 | "Check for Public Internet Access
\n",
11 | "\n",
12 | "```python\n",
13 | "import requests\n",
14 | "response = requests.get(\"https://oracle.com\")\n",
15 | "assert response.status_code==200, \"Internet connection failed\"\n",
16 | "```\n",
17 | " \n",
18 | "\n",
19 | "Helpful Documentation
\n",
20 | "\n",
23 | " \n",
24 | "\n",
25 | "Typical Cell Imports and Settings for ADS
\n",
26 | "\n",
27 | "```python\n",
28 | "%load_ext autoreload\n",
29 | "%autoreload 2\n",
30 | "%matplotlib inline\n",
31 | "\n",
32 | "import warnings\n",
33 | "warnings.filterwarnings('ignore')\n",
34 | "\n",
35 | "import logging\n",
36 | "logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)\n",
37 | "\n",
38 | "import ads\n",
39 | "from ads.dataset.factory import DatasetFactory\n",
40 | "from ads.automl.provider import OracleAutoMLProvider\n",
41 | "from ads.automl.driver import AutoML\n",
42 | "from ads.evaluations.evaluator import ADSEvaluator\n",
43 | "from ads.common.data import ADSData\n",
44 | "from ads.explanations.explainer import ADSExplainer\n",
45 | "from ads.explanations.mlx_global_explainer import MLXGlobalExplainer\n",
46 | "from ads.explanations.mlx_local_explainer import MLXLocalExplainer\n",
47 | "from ads.catalog.model import ModelCatalog\n",
48 | "from ads.common.model_artifact import ModelArtifact\n",
49 | "```\n",
50 | " \n",
51 | "\n",
52 | "Useful Environment Variables
\n",
53 | "\n",
54 | "```python\n",
55 | "import os\n",
56 | "print(os.environ[\"NB_SESSION_COMPARTMENT_OCID\"])\n",
57 | "print(os.environ[\"PROJECT_OCID\"])\n",
58 | "print(os.environ[\"USER_OCID\"])\n",
59 | "print(os.environ[\"TENANCY_OCID\"])\n",
60 | "print(os.environ[\"NB_REGION\"])\n",
61 | "```\n",
62 | " "
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "id": "df7e0756",
68 | "metadata": {},
69 | "source": [
70 | "# Basic Search"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "id": "864ec05a-e63b-4acc-9aa0-43f3dadcf2a1",
76 | "metadata": {},
77 | "source": [
78 | "This list below will act as our database for the search. "
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "id": "30e080e2",
85 | "metadata": {
86 | "tags": []
87 | },
88 | "outputs": [],
89 | "source": [
90 | "# Simulated database of Wikipedia-like entries\n",
91 | "articles = [\n",
92 | " {'title': 'Python (programming language)', 'link': 'https://en.wikipedia.org/wiki/Python_(programming_language)'},\n",
93 | " {'title': 'History of Python', 'link': 'https://en.wikipedia.org/wiki/History_of_Python'},\n",
94 | " {'title': 'Monty Python', 'link': 'https://en.wikipedia.org/wiki/Monty_Python'},\n",
95 | " {'title': 'Anaconda (Python distribution)', 'link': 'https://en.wikipedia.org/wiki/Anaconda_(Python_distribution)'},\n",
96 | " {'title': 'Python molurus', 'link': 'https://en.wikipedia.org/wiki/Python_molurus'},\n",
97 | " {'title': 'Association football', 'link': 'https://en.wikipedia.org/wiki/Association_football'},\n",
98 | " {'title': 'FIFA World Cup', 'link': 'https://en.wikipedia.org/wiki/FIFA_World_Cup'},\n",
99 | " {'title': 'History of artificial intelligence', 'link': 'https://en.wikipedia.org/wiki/History_of_artificial_intelligence'},\n",
100 | " {'title': 'Football in England', 'link': 'https://en.wikipedia.org/wiki/Football_in_England'},\n",
101 | " {'title': 'Applications of artificial intelligence', 'link': 'https://en.wikipedia.org/wiki/Applications_of_artificial_intelligence'}\n",
102 | "]"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "id": "401c8558",
108 | "metadata": {},
109 | "source": [
110 | "This function is designed to perform a keyword search on the provided list of articles. It takes two parameters: articles, which is the list of article dictionaries, and keyword, which is the user's search term."
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "564d1024",
117 | "metadata": {
118 | "tags": []
119 | },
120 | "outputs": [],
121 | "source": [
122 | "# Function to perform keyword search on the simulated database\n",
123 | "def keyword_search(articles, keyword):\n",
124 | " # Convert keyword to lowercase for case-insensitive matching\n",
125 | " keyword = keyword.lower()\n",
126 | " # Search for the keyword in the titles of the articles\n",
127 | " results = [article for article in articles if keyword in article['title'].lower()]\n",
128 | " return results"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "id": "4f45bbd0",
134 | "metadata": {},
135 | "source": [
136 | "The code prompts the user to enter a keyword through the input function. This keyword is then used to search the database. The search results are then displayed to the user in a simple text format that lists the title and the link of each matching article. The loop iterates over the search_results and prints them out."
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": null,
142 | "id": "888ea02e",
143 | "metadata": {
144 | "tags": []
145 | },
146 | "outputs": [],
147 | "source": [
148 | "# Example usage\n",
149 | "keyword = input(\"Enter a keyword to search: \")\n",
150 | "search_results = keyword_search(articles, keyword)\n",
151 | "\n",
152 | "# Display the search results\n",
153 | "for result in search_results:\n",
154 | " print(result['title'], result['link'])"
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": null,
160 | "id": "3167833a",
161 | "metadata": {
162 | "tags": []
163 | },
164 | "outputs": [],
165 | "source": [
166 | "# Example usage\n",
167 | "keyword = input(\"Enter a keyword to search: \")\n",
168 | "search_results = keyword_search(articles, keyword)\n",
169 | "\n",
170 | "# Display the search results\n",
171 | "for result in search_results:\n",
172 | " print(result['title'], result['link'])"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "id": "f7797c80",
178 | "metadata": {},
179 | "source": [
180 | "What we just saw are a very high level implementation."
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "id": "75a4b2f5",
186 | "metadata": {},
187 | "source": [
188 | "# Search using BM25 Algo"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "id": "a6bb7b67",
194 | "metadata": {},
195 | "source": [
196 | "Implementing a keyword search using the BM25 algorithm in Python can be done using the rank_bm25 package, which is a lightweight BM25 implementation. \"20 Newsgroups\" is a collection of approximately 20,000 newsgroup documents, partitioned across 20 different newsgroups. This is a basic example of how keyword search can be implemented on a text dataset using the BM25 algorithm. It demonstrates preprocessing, scoring, and ranking documents based on their relevance to a given query."
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": null,
202 | "id": "136f29a6",
203 | "metadata": {},
204 | "outputs": [],
205 | "source": [
206 | "# !pip install rank-bm25"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 1,
212 | "id": "c3f38e4e",
213 | "metadata": {
214 | "tags": []
215 | },
216 | "outputs": [],
217 | "source": [
218 | "from rank_bm25 import BM25Okapi\n",
219 | "from sklearn.datasets import fetch_20newsgroups\n",
220 | "import string"
221 | ]
222 | },
223 | {
224 | "cell_type": "markdown",
225 | "id": "d8eb25a5",
226 | "metadata": {},
227 | "source": [
228 | "This function call retrieves the entire \"20 Newsgroups\" dataset, which is a collection of approximately 20,000 newsgroup documents."
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": 2,
234 | "id": "0792d651",
235 | "metadata": {
236 | "tags": []
237 | },
238 | "outputs": [],
239 | "source": [
240 | "# Fetch the dataset\n",
241 | "newsgroups = fetch_20newsgroups(subset='all')\n",
242 | "documents = newsgroups.data # A list of documents (newsgroup posts)"
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "id": "b3b07cb8",
248 | "metadata": {},
249 | "source": [
250 | "The preprocess function converts text to lowercase, removes punctuation, and splits it into words (tokens). This standardization is essential for effective keyword matching."
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": 3,
256 | "id": "45ad1ab4",
257 | "metadata": {
258 | "tags": []
259 | },
260 | "outputs": [],
261 | "source": [
262 | "# Preprocess the documents\n",
263 | "def preprocess(text):\n",
264 | " return text.lower().translate(str.maketrans('', '', string.punctuation)).split()\n",
265 | "\n",
266 | "# Tokenize the documents\n",
267 | "tokenized_docs = [preprocess(doc) for doc in documents]"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "id": "fb505cea",
273 | "metadata": {},
274 | "source": [
275 | "This initializes the BM25 model with the preprocessed (tokenized) documents. The model will use this data to compute the relevance of documents to a query."
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 4,
281 | "id": "c3d945ba",
282 | "metadata": {
283 | "tags": []
284 | },
285 | "outputs": [],
286 | "source": [
287 | "# Create a BM25 object\n",
288 | "bm25 = BM25Okapi(tokenized_docs)"
289 | ]
290 | },
291 | {
292 | "cell_type": "code",
293 | "execution_count": 5,
294 | "id": "1651ff4f",
295 | "metadata": {
296 | "tags": []
297 | },
298 | "outputs": [],
299 | "source": [
300 | "# Example search query\n",
301 | "query = \"What are some top brands for baseball equipment?\"\n",
302 | "tokenized_query = preprocess(query)"
303 | ]
304 | },
305 | {
306 | "cell_type": "markdown",
307 | "id": "72a8babd",
308 | "metadata": {},
309 | "source": [
310 | "The BM25 model calculates a score for each document based on its relevance to the query. These scores indicate how well each document matches the query."
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": 6,
316 | "id": "be277ea3",
317 | "metadata": {
318 | "tags": []
319 | },
320 | "outputs": [],
321 | "source": [
322 | "# Perform search\n",
323 | "doc_scores = bm25.get_scores(tokenized_query)"
324 | ]
325 | },
326 | {
327 | "cell_type": "code",
328 | "execution_count": 7,
329 | "id": "37224215",
330 | "metadata": {
331 | "tags": []
332 | },
333 | "outputs": [],
334 | "source": [
335 | "# Get top N documents\n",
336 | "top_n = 2\n",
337 | "top_doc_indices = sorted(range(len(doc_scores)), key=lambda i: doc_scores[i], reverse=True)[:top_n]"
338 | ]
339 | },
340 | {
341 | "attachments": {},
342 | "cell_type": "markdown",
343 | "id": "fe929992",
344 | "metadata": {},
345 | "source": [
346 | "The script prints the file path (document ID), the BM25 score, and the first 200 characters of each of the top 2 documents. This gives you a glimpse of the content of the documents that are most relevant to the query \"top brands for baseball equipment\"."
347 | ]
348 | },
349 | {
350 | "cell_type": "code",
351 | "execution_count": null,
352 | "id": "ad2be74d",
353 | "metadata": {
354 | "tags": []
355 | },
356 | "outputs": [],
357 | "source": [
358 | "# Display top N results (2 results)\n",
359 | "for idx in top_doc_indices:\n",
360 | " print(f\"Document ID: {newsgroups.filenames[idx]}, Score: {doc_scores[idx]}\\nDocument: {documents[idx][:600]}...\\n\")"
361 | ]
362 | },
363 | {
364 | "cell_type": "code",
365 | "execution_count": null,
366 | "id": "4a623c94-5020-430c-96b6-31d214e0fe60",
367 | "metadata": {},
368 | "outputs": [],
369 | "source": []
370 | }
371 | ],
372 | "metadata": {
373 | "kernelspec": {
374 | "display_name": "oracle23ai",
375 | "language": "python",
376 | "name": "python3"
377 | },
378 | "language_info": {
379 | "codemirror_mode": {
380 | "name": "ipython",
381 | "version": 3
382 | },
383 | "file_extension": ".py",
384 | "mimetype": "text/x-python",
385 | "name": "python",
386 | "nbconvert_exporter": "python",
387 | "pygments_lexer": "ipython3",
388 | "version": "3.12.3"
389 | }
390 | },
391 | "nbformat": 4,
392 | "nbformat_minor": 5
393 | }
394 |
--------------------------------------------------------------------------------
/demos/module3/Semantic.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "97a60f1f",
6 | "metadata": {},
7 | "source": [
8 | "### OCI Data Science - Useful Tips\n",
9 | "\n",
10 | "Check for Public Internet Access
\n",
11 | "\n",
12 | "```python\n",
13 | "import requests\n",
14 | "response = requests.get(\"https://oracle.com\")\n",
15 | "assert response.status_code==200, \"Internet connection failed\"\n",
16 | "```\n",
17 | " \n",
18 | "\n",
19 | "Helpful Documentation
\n",
20 | "\n",
23 | " \n",
24 | "\n",
25 | "Typical Cell Imports and Settings for ADS
\n",
26 | "\n",
27 | "```python\n",
28 | "%load_ext autoreload\n",
29 | "%autoreload 2\n",
30 | "%matplotlib inline\n",
31 | "\n",
32 | "import warnings\n",
33 | "warnings.filterwarnings('ignore')\n",
34 | "\n",
35 | "import logging\n",
36 | "logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)\n",
37 | "\n",
38 | "import ads\n",
39 | "from ads.dataset.factory import DatasetFactory\n",
40 | "from ads.automl.provider import OracleAutoMLProvider\n",
41 | "from ads.automl.driver import AutoML\n",
42 | "from ads.evaluations.evaluator import ADSEvaluator\n",
43 | "from ads.common.data import ADSData\n",
44 | "from ads.explanations.explainer import ADSExplainer\n",
45 | "from ads.explanations.mlx_global_explainer import MLXGlobalExplainer\n",
46 | "from ads.explanations.mlx_local_explainer import MLXLocalExplainer\n",
47 | "from ads.catalog.model import ModelCatalog\n",
48 | "from ads.common.model_artifact import ModelArtifact\n",
49 | "```\n",
50 | " \n",
51 | "\n",
52 | "Useful Environment Variables
\n",
53 | "\n",
54 | "```python\n",
55 | "import os\n",
56 | "print(os.environ[\"NB_SESSION_COMPARTMENT_OCID\"])\n",
57 | "print(os.environ[\"PROJECT_OCID\"])\n",
58 | "print(os.environ[\"USER_OCID\"])\n",
59 | "print(os.environ[\"TENANCY_OCID\"])\n",
60 | "print(os.environ[\"NB_REGION\"])\n",
61 | "```\n",
62 | " "
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "id": "7fcac398",
68 | "metadata": {},
69 | "source": [
70 | "# Search using Keyword and Vector Database"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "id": "e991ed5d",
77 | "metadata": {
78 | "tags": []
79 | },
80 | "outputs": [],
81 | "source": [
82 | "# !pip install cohere\n",
83 | "# !pip install weaviate-client"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 1,
89 | "id": "d77ccfda",
90 | "metadata": {
91 | "tags": []
92 | },
93 | "outputs": [],
94 | "source": [
95 | "# import libraries\n",
96 | "import weaviate\n",
97 | "import cohere"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": 2,
103 | "id": "8319b542",
104 | "metadata": {
105 | "tags": []
106 | },
107 | "outputs": [],
108 | "source": [
109 | "# Add your Cohere API key here\n",
110 | "# You can obtain a key by signing up in https://dashboard.cohere.com/ or https://docs.cohere.com/reference/key\n",
111 | "cohere_api_key = ''"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": 3,
117 | "id": "78381ca1",
118 | "metadata": {
119 | "tags": []
120 | },
121 | "outputs": [],
122 | "source": [
123 | "# Create a Cohere client object\n",
124 | "co = cohere.Client(cohere_api_key)"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": 4,
130 | "id": "2544f870",
131 | "metadata": {
132 | "tags": []
133 | },
134 | "outputs": [
135 | {
136 | "name": "stderr",
137 | "output_type": "stream",
138 | "text": [
139 | "/home/datascience/conda/generalml_p38_cpu_v1/lib/python3.8/site-packages/weaviate/warnings.py:121: DeprecationWarning: Dep005: You are using weaviate-client version 3.26.1. The latest version is 4.5.4.\n",
140 | " Please consider upgrading to the latest version. See https://weaviate.io/developers/weaviate/client-libraries/python for details.\n",
141 | " warnings.warn(\n"
142 | ]
143 | }
144 | ],
145 | "source": [
146 | "# Create a vector database object\n",
147 | "# Connect to the Weaviate demo databse containing 10M wikipedia vectors\n",
148 | "# You can obtain a key from https://weaviate.io and refer to https://weaviate.io/developers/wcs/authentication#connect-with-an-api\n",
149 | "\n",
150 | "auth_config = weaviate.auth.AuthApiKey(api_key=\"\")\n",
151 | "client = weaviate.Client(\n",
152 | " url=\"https://cohere-demo.weaviate.network/\",\n",
153 | " auth_client_secret=auth_config,\n",
154 | " additional_headers={\n",
155 | " \"X-Cohere-Api-Key\": cohere_api_key,\n",
156 | " }\n",
157 | ")"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 5,
163 | "id": "bc33194d",
164 | "metadata": {
165 | "tags": []
166 | },
167 | "outputs": [],
168 | "source": [
169 | "# Keyword Function\n",
170 | "def keyword_search(query, results_lang='en', num_results=3):\n",
171 | " properties = [\"text\", \"title\", \"url\"]\n",
172 | "\n",
173 | "\n",
174 | " where_filter = {\n",
175 | " \"path\": [\"lang\"],\n",
176 | " \"operator\": \"Equal\",\n",
177 | " \"valueString\": results_lang\n",
178 | " }\n",
179 | "\n",
180 | "\n",
181 | " response = (\n",
182 | " client.query.get(\"Articles\", properties)\n",
183 | " .with_bm25(\n",
184 | " query=query\n",
185 | " )\n",
186 | " .with_where(where_filter)\n",
187 | " .with_limit(num_results)\n",
188 | " .do()\n",
189 | " )\n",
190 | " result = response['data']['Get']['Articles']\n",
191 | " return result"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": 6,
197 | "id": "9a7e8d89",
198 | "metadata": {
199 | "tags": []
200 | },
201 | "outputs": [],
202 | "source": [
203 | "# Example usage\n",
204 | "query = \"What is the highest mountain peak in the world?\"\n",
205 | "top_documents = keyword_search(query)"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": 7,
211 | "id": "85e5683a",
212 | "metadata": {
213 | "tags": []
214 | },
215 | "outputs": [
216 | {
217 | "data": {
218 | "text/plain": [
219 | "[{'text': \"The Polish scientist and explorer Count Paul Edmund Strzelecki conducted surveying work in the Australian Alps in 1839 and, led by his two Aboriginal guides Charlie Tarra and Jackie, became the first European to ascend Australia's highest peak, which he named Mount Kosciuszko in honour of the Polish patriot Tadeusz Kościuszko.European explorers penetrated deeper into the interior in the 1840s in a quest to discover new lands for agriculture or answer scientific enquiries. The German scientist Ludwig Leichhardt led three expeditions in northern Australia in this decade, sometimes with the help of Aboriginal guides, identifying the grazing potential of the region and making important discoveries in the fields of botany and geology. He and his party disappeared in 1848 while attempting to cross the continent from east to west. Edmund Kennedy led an expedition into what is now far-western Queensland in 1847 before being speared by Aborigines in the Cape York Peninsula in 1848.\",\n",
220 | " 'title': 'History of Australia',\n",
221 | " 'url': 'https://en.wikipedia.org/wiki?curid=39582'},\n",
222 | " {'text': \"As desert life proves arduous, the Israelites complain and long for Egypt, but God miraculously provides manna for them to eat and water to drink. The Israelites arrive at the mountain of God, where Moses's father-in-law Jethro visits Moses; at his suggestion, Moses appoints judges over Israel. God asks whether they will agree to be his people. They accept. The people gather at the foot of the mountain, and with thunder and lightning, fire and clouds of smoke, the sound of trumpets, and the trembling of the mountain, God appears on the peak, and the people see the cloud and hear the voice (or possibly sound) of God. God tells Moses to ascend the mountain. God pronounces the Ten Commandments (the Ethical Decalogue) in the hearing of all Israel. Moses goes up the mountain into the presence of God, who pronounces the Covenant Code of ritual and civil law and promises Canaan to them if they obey. Moses comes down from the mountain and writes down God's words, and the people agree to keep them. God calls Moses up the mountain again, where he remains for forty days and forty nights, after which he returns, bearing the set of stone tablets.\",\n",
223 | " 'title': 'Book of Exodus',\n",
224 | " 'url': 'https://en.wikipedia.org/wiki?curid=9662'},\n",
225 | " {'text': 'The complicated geological past of the region is obvious from the morphology of Olympus and its National Park. Features include deep gorges and lots of smooth peaks, many of them over , including Aghios Antonios (), Kalogeros (), Toumpa () and Profitis Ilias (). However, it is the central, almost vertical, rocky peaks, that impress the visitor. Over the town of Litochoro, on the horizon, the relief of the mountain displays an apparent \"V\", between two peaks of almost equal height. The left limb is the peak named Mytikas (or Pantheon). It is Greece\\'s highest peak. Then, on the right is Stefani (or Thronos Dios [Throne of Zeus]), which presents the most impressive and steep peak of Olympus, with its last sharply rising 200 meters presenting the greatest challenge for climbers. Further south, Skolio ( second highest sub-peak – ) completes an arc of about 200 degrees, with its steep slopes forming on the west side, like a wall, an impressive precipitous amphitheatrical cavity, in depth and in circumference, the \\'Megala Kazania\\'. On the east side of the high peaks the steep slopes form zone like parallel folds, the \\'Zonaria\\'. Even narrower and steeper scorings, the \\'Loukia\\', lead to the peak.',\n",
226 | " 'title': 'Mount Olympus',\n",
227 | " 'url': 'https://en.wikipedia.org/wiki?curid=12418604'}]"
228 | ]
229 | },
230 | "execution_count": 7,
231 | "metadata": {},
232 | "output_type": "execute_result"
233 | }
234 | ],
235 | "source": [
236 | "top_documents"
237 | ]
238 | },
239 | {
240 | "cell_type": "code",
241 | "execution_count": 8,
242 | "id": "932a3ae7",
243 | "metadata": {
244 | "tags": []
245 | },
246 | "outputs": [],
247 | "source": [
248 | "# Example usage\n",
249 | "query1 = \"What is the capital of United States of America?\"\n",
250 | "top_documents1 = keyword_search(query1)"
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": 9,
256 | "id": "3728c89b",
257 | "metadata": {
258 | "tags": []
259 | },
260 | "outputs": [
261 | {
262 | "data": {
263 | "text/plain": [
264 | "[{'text': 'In 2013 longtime New Age author Marianne Williamson launched a campaign for a seat in the United States House of Representatives, telling \"The New York Times\" that her type of spirituality was what American politics needed. \"America has swerved from its ethical center\", she said. Running as an independent in west Los Angeles, she finished fourth in her district\\'s open primary election with 13% of the vote. In early 2019, Williamson announced her candidacy for the Democratic Party nomination for president of the United States in the 2020 United States presidential election. A 5,300-word article about her presidential campaign in \"The Washington Post\" said she had \"plans to fix America with love. Tough love\". In January 2020 she withdrew her bid for the nomination.',\n",
265 | " 'title': 'New Age',\n",
266 | " 'url': 'https://en.wikipedia.org/wiki?curid=21742'},\n",
267 | " {'text': 'Governor Juan José Estrada, member of the Conservative Party, led a revolt against President José Santos Zelaya, member of the Liberal Party reelected in 1906. This became what is known as the Estrada rebellion. The United States supported the conservative forces because Zelaya had wanted to work with Germany or Japan to build a new canal through the country. The U.S. controlled the Panama Canal and did not want competition from another country outside of the Americas. Thomas P Moffat, a US council in Bluefields, Nicaragua would give overt support, in conflict with the US trying to only give covert support. Direct intervention would be pushed by the secretary of state Philander C. Knox. Two Americans were executed by Zelaya for their participation with the conservatives. Seeing an opportunity the United States became directly involved in the rebellion and sent in troops, which landed on the Mosquito Coast. On December 14, 1909 Zelaya was forced to resign under diplomatic pressure from America and fled Nicaragua. Before Zelaya fled, he along with the liberal assembly choose José Madriz to lead Nicaragua. The U.S. refused to recognize Madriz. The conservatives eventually beat back the liberals and forced Madriz to resign. Estrada then became the president. Thomas Cleland Dawson was sent as a special agent to the country and determined that any election held would bring the liberals into power, so had Estrada set up a constituent assembly to elect him instead. In August 1910 Estrada became President of Nicaragua under U.S. recognition, agreeing to certain conditions from the U.S. After the intervention, the U.S. and Nicaragua signed a treaty on June 6, 1911.',\n",
268 | " 'title': 'United States involvement in regime change',\n",
269 | " 'url': 'https://en.wikipedia.org/wiki?curid=37258993'},\n",
270 | " {'text': 'In what became known as the \"Banana Wars,\" between the end of the Spanish–American War in 1898 and the inception of the Good Neighbor Policy in 1934, the U.S. staged many military invasions and interventions in Central America and the Caribbean. One of these incursions, in 1903, involved regime change rather than regime preservation. The United States Marine Corps, which most often fought these wars, developed a manual called \"The Strategy and Tactics of Small Wars\" in 1921 based on its experiences. On occasion, the Navy provided gunfire support and Army troops were also used. The United Fruit Company and Standard Fruit Company dominated Honduras\\' key banana export sector and associated land holdings and railways. The U.S. staged invasions and incursions of US troops in 1903 (supporting a coup by Manuel Bonilla), 1907 (supporting Bonilla against a Nicaraguan-backed coup), 1911 and 1912 (defending the regime of Miguel R. Davila from an uprising), 1919 (peacekeeping during a civil war, and installing the caretaker government of Francisco Bográn), 1920 (defending the Bográn regime from a general strike), 1924 (defending the regime of Rafael López Gutiérrez from an uprising) and 1925 (defending the elected government of Miguel Paz Barahona) to defend US interests.',\n",
271 | " 'title': 'United States involvement in regime change',\n",
272 | " 'url': 'https://en.wikipedia.org/wiki?curid=37258993'}]"
273 | ]
274 | },
275 | "execution_count": 9,
276 | "metadata": {},
277 | "output_type": "execute_result"
278 | }
279 | ],
280 | "source": [
281 | "top_documents1"
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "id": "4ce0ff9b",
287 | "metadata": {},
288 | "source": [
289 | "# Search usinig Dense Retrieval and Vector Database"
290 | ]
291 | },
292 | {
293 | "cell_type": "code",
294 | "execution_count": 10,
295 | "id": "5fc6cd02",
296 | "metadata": {
297 | "tags": []
298 | },
299 | "outputs": [],
300 | "source": [
301 | "# Dense Retrieval function\n",
302 | "def dense_retrieval(query, results_lang='en', num_results=3):\n",
303 | "\n",
304 | " nearText = {\"concepts\": [query]}\n",
305 | " properties = [\"text\", \"title\", \"url\", \"_additional {distance}\"]\n",
306 | "\n",
307 | " # To filter by language\n",
308 | " where_filter = {\n",
309 | " \"path\": [\"lang\"],\n",
310 | " \"operator\": \"Equal\",\n",
311 | " \"valueString\": results_lang\n",
312 | " }\n",
313 | " response = (\n",
314 | " client.query\n",
315 | " .get(\"Articles\", properties)\n",
316 | " .with_near_text(nearText)\n",
317 | " .with_where(where_filter)\n",
318 | " .with_limit(num_results)\n",
319 | " .do()\n",
320 | " )\n",
321 | "\n",
322 | " result = response['data']['Get']['Articles']\n",
323 | " return result"
324 | ]
325 | },
326 | {
327 | "cell_type": "code",
328 | "execution_count": 11,
329 | "id": "e9ea58b6",
330 | "metadata": {
331 | "tags": []
332 | },
333 | "outputs": [],
334 | "source": [
335 | "top_documents = dense_retrieval(query)"
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": 12,
341 | "id": "ab1fe8ab",
342 | "metadata": {
343 | "tags": []
344 | },
345 | "outputs": [
346 | {
347 | "data": {
348 | "text/plain": [
349 | "[{'_additional': {'distance': -150.58865},\n",
350 | " 'text': \"Heights of mountains are typically measured above sea level. Using this metric, Mount Everest is the highest mountain on Earth, at . There are at least 100 mountains with heights of over above sea level, all of which are located in central and southern Asia. The highest mountains above sea level are generally not the highest above the surrounding terrain. There is no precise definition of surrounding base, but Denali, Mount Kilimanjaro and Nanga Parbat are possible candidates for the tallest mountain on land by this measure. The bases of mountain islands are below sea level, and given this consideration Mauna Kea ( above sea level) is the world's tallest mountain and volcano, rising about from the Pacific Ocean floor.\",\n",
351 | " 'title': 'Mountain',\n",
352 | " 'url': 'https://en.wikipedia.org/wiki?curid=37754'},\n",
353 | " {'_additional': {'distance': -150.0647},\n",
354 | " 'text': 'Until 1852, Kangchenjunga was assumed to be the highest mountain in the world, but calculations and measurements by the Great Trigonometrical Survey of India in 1849 showed that Mount Everest, known as Peak XV at the time, is actually higher. After allowing for further verification of all calculations, it was officially announced in 1856 that Kangchenjunga was the third highest mountain.',\n",
355 | " 'title': 'Kangchenjunga',\n",
356 | " 'url': 'https://en.wikipedia.org/wiki?curid=17073'},\n",
357 | " {'_additional': {'distance': -149.33359},\n",
358 | " 'text': 'A 1986 expedition led by George Wallerstein made an inaccurate measurement showing that K2 was taller than Mount Everest, and therefore the tallest mountain in the world. A corrected measurement was made in 1987, but by then the claim that K2 was the tallest mountain in the world had already made it into many news reports and reference works.',\n",
359 | " 'title': 'K2',\n",
360 | " 'url': 'https://en.wikipedia.org/wiki?curid=17359'}]"
361 | ]
362 | },
363 | "execution_count": 12,
364 | "metadata": {},
365 | "output_type": "execute_result"
366 | }
367 | ],
368 | "source": [
369 | "top_documents"
370 | ]
371 | },
372 | {
373 | "cell_type": "code",
374 | "execution_count": 13,
375 | "id": "663bcbfd",
376 | "metadata": {
377 | "tags": []
378 | },
379 | "outputs": [],
380 | "source": [
381 | "# Example usage\n",
382 | "top_documents1 = dense_retrieval(query1)"
383 | ]
384 | },
385 | {
386 | "cell_type": "code",
387 | "execution_count": 14,
388 | "id": "82b8b3b9",
389 | "metadata": {
390 | "tags": []
391 | },
392 | "outputs": [
393 | {
394 | "data": {
395 | "text/plain": [
396 | "[{'_additional': {'distance': -148.4822},\n",
397 | " 'text': \"In 1785, the assembly of the Congress of the Confederation made New York City the national capital shortly after the war. New York was the last capital of the U.S. under the Articles of Confederation and the first capital under the Constitution of the United States. New York City as the U.S. capital hosted several events of national scope in 1789—the first President of the United States, George Washington, was inaugurated; the first United States Congress and the Supreme Court of the United States each assembled for the first time; and the United States Bill of Rights was drafted, all at Federal Hall on Wall Street. In 1790, New York surpassed Philadelphia as the nation's largest city. At the end of that year, pursuant to the Residence Act, the national capital was moved to Philadelphia.\",\n",
398 | " 'title': 'New York City',\n",
399 | " 'url': 'https://en.wikipedia.org/wiki?curid=645042'},\n",
400 | " {'_additional': {'distance': -147.14255},\n",
401 | " 'text': \"The United States of America (U.S.A. or USA), commonly known as the United States (U.S. or US) or America, is a country primarily in North America. It consists of 50 states, a federal district, five major unincorporated territories, nine Minor Outlying Islands, and 326 Indian reservations. It is the world's third-largest country by both land and total area. The United States shares land borders with Canada to its north and with Mexico to its south. It has maritime borders with the Bahamas, Cuba, Russia, and other nations. With a population of over 331 million, it is the most populous country in the Americas and the third most populous in the world. The national capital is Washington, D.C., and the most populous city and financial center is New York City.\",\n",
402 | " 'title': 'United States',\n",
403 | " 'url': 'https://en.wikipedia.org/wiki?curid=3434750'},\n",
404 | " {'_additional': {'distance': -146.67282},\n",
405 | " 'text': 'The word \"capitol\" has since been adopted, following the example of the United States Capitol, in many jurisdictions also for other government buildings, for instance the \"capitols\" in the individual capitals of the states of the United States. This, in turn, has led to frequent misspellings of \"capitol\" and \"capital\". The former refers to a building which houses government institutions; the latter refers to the entire city.',\n",
406 | " 'title': 'United States Capitol',\n",
407 | " 'url': 'https://en.wikipedia.org/wiki?curid=31979'}]"
408 | ]
409 | },
410 | "execution_count": 14,
411 | "metadata": {},
412 | "output_type": "execute_result"
413 | }
414 | ],
415 | "source": [
416 | "top_documents1"
417 | ]
418 | },
419 | {
420 | "cell_type": "markdown",
421 | "id": "6ff6dd6d",
422 | "metadata": {},
423 | "source": [
424 | "# Search using Dense Retrieval, Rerank and Vector Database"
425 | ]
426 | },
427 | {
428 | "cell_type": "code",
429 | "execution_count": 15,
430 | "id": "01c7dead",
431 | "metadata": {
432 | "tags": []
433 | },
434 | "outputs": [],
435 | "source": [
436 | "def rerank_responses(query, responses, num_responses=2):\n",
437 | " reranked_responses = co.rerank(\n",
438 | " model = 'rerank-english-v2.0',\n",
439 | " query = query,\n",
440 | " documents = responses,\n",
441 | " top_n = num_responses,\n",
442 | " )\n",
443 | " return reranked_responses"
444 | ]
445 | },
446 | {
447 | "cell_type": "code",
448 | "execution_count": 16,
449 | "id": "5b8fe148",
450 | "metadata": {
451 | "tags": []
452 | },
453 | "outputs": [],
454 | "source": [
455 | "texts = [result.get('text') for result in top_documents]\n",
456 | "reranked_text = rerank_responses(query, texts)"
457 | ]
458 | },
459 | {
460 | "cell_type": "code",
461 | "execution_count": 17,
462 | "id": "43be122a",
463 | "metadata": {
464 | "tags": []
465 | },
466 | "outputs": [
467 | {
468 | "name": "stdout",
469 | "output_type": "stream",
470 | "text": [
471 | "i:0\n",
472 | "RerankResult\n",
473 | "i:1\n",
474 | "RerankResult\n"
475 | ]
476 | }
477 | ],
478 | "source": [
479 | "for i, result in enumerate(reranked_text):\n",
480 | " print(f\"i:{i}\")\n",
481 | " print(f\"{result}\")"
482 | ]
483 | },
484 | {
485 | "cell_type": "code",
486 | "execution_count": 18,
487 | "id": "47dc2039",
488 | "metadata": {
489 | "tags": []
490 | },
491 | "outputs": [],
492 | "source": [
493 | "texts = [result.get('text') for result in top_documents1]\n",
494 | "reranked_text1 = rerank_responses(query1, texts)"
495 | ]
496 | },
497 | {
498 | "cell_type": "code",
499 | "execution_count": 19,
500 | "id": "e5602eec",
501 | "metadata": {
502 | "tags": []
503 | },
504 | "outputs": [
505 | {
506 | "name": "stdout",
507 | "output_type": "stream",
508 | "text": [
509 | "i:0\n",
510 | "RerankResult\n",
511 | "i:1\n",
512 | "RerankResult\n"
513 | ]
514 | }
515 | ],
516 | "source": [
517 | "for i, result in enumerate(reranked_text1):\n",
518 | " print(f\"i:{i}\")\n",
519 | " print(f\"{result}\")"
520 | ]
521 | },
522 | {
523 | "cell_type": "code",
524 | "execution_count": null,
525 | "id": "6455c1ea-e162-45f0-8e04-207614a4d5b8",
526 | "metadata": {},
527 | "outputs": [],
528 | "source": []
529 | }
530 | ],
531 | "metadata": {
532 | "kernelspec": {
533 | "display_name": "Python [conda env:generalml_p38_cpu_v1]",
534 | "language": "python",
535 | "name": "conda-env-generalml_p38_cpu_v1-py"
536 | },
537 | "language_info": {
538 | "codemirror_mode": {
539 | "name": "ipython",
540 | "version": 3
541 | },
542 | "file_extension": ".py",
543 | "mimetype": "text/x-python",
544 | "name": "python",
545 | "nbconvert_exporter": "python",
546 | "pygments_lexer": "ipython3",
547 | "version": "3.8.13"
548 | }
549 | },
550 | "nbformat": 4,
551 | "nbformat_minor": 5
552 | }
553 |
--------------------------------------------------------------------------------
/demos/module4/OU ChatBot Setup-V1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ou-developers/ou-generativeai-pro/d95e9e524101a071e400498c6b58bd3c762a7109/demos/module4/OU ChatBot Setup-V1.pdf
--------------------------------------------------------------------------------
/demos/module4/demo-chains.py:
--------------------------------------------------------------------------------
1 | from langchain.prompts import ChatPromptTemplate
2 | from langchain.schema import StrOutputParser
3 | from langchain.chains import LLMChain
4 |
5 | from langchain_community.llms import OCIGenAI
6 | import oci
7 |
8 | #In this demo we will explore using LanChain Chain using chain class and a declarative approaches.
9 |
10 | #Step 1 - setup OCI Generative AI llm
11 |
12 | # Service endpoint
13 | endpoint = "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
14 |
15 | llm = OCIGenAI(
16 | model_id="cohere.command-light",
17 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
18 | compartment_id=,
19 | model_kwargs={"max_tokens":200}
20 | )
21 |
22 | #Step 2 - use chat messge template and messages and pass {question}
23 |
24 | prompt = ChatPromptTemplate.from_messages([
25 | ("system", "You're a very knowledgeable scientist who provides accurate and eloquent answers to scientific questions."),
26 | ("human", "{question}")
27 | ])
28 |
29 | #Step 3 - create a chain using LLMChain class and invoke a chain to get a response.
30 | #legacy chain
31 |
32 | chain = LLMChain(llm=llm, prompt=prompt, output_parser=StrOutputParser())
33 | response = chain.invoke({"question":"What are basic elements of a matter"})
34 | print("Response from legacy chain")
35 | print(response)
36 |
37 | #Step 4 - here we use langchain expression language to compose a chain and invoke it.
38 | #lecl chain
39 |
40 | runnable = prompt | llm | StrOutputParser()
41 | response = runnable.invoke({"question": "What are basic elements of a matter"})
42 | print("Response from LECL Chain")
43 | print(response)
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/demos/module4/demo-chroma-create.py:
--------------------------------------------------------------------------------
1 | from langchain_community.embeddings import OCIGenAIEmbeddings
2 | from langchain_community.vectorstores import Chroma
3 | from langchain.text_splitter import RecursiveCharacterTextSplitter
4 | from langchain_community.document_loaders import PyPDFDirectoryLoader
5 |
6 | from langchain_community.embeddings import CohereEmbeddings
7 |
8 | pdf_loader = PyPDFDirectoryLoader("./pdf-docs" )
9 |
10 | loaders = [pdf_loader]
11 |
12 | documents = []
13 | for loader in loaders:
14 | documents.extend(loader.load())
15 |
16 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=2500, chunk_overlap=100)
17 | all_documents = text_splitter.split_documents(documents)
18 |
19 | print(f"Total number of documents: {len(all_documents)}")
20 |
21 | #Step 1 - setup OCI Generative AI llm
22 |
23 | embeddings = OCIGenAIEmbeddings(
24 | model_id="cohere.embed-english-v3.0",
25 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
26 | compartment_id="<>",
27 | model_kwargs={"truncate":True}
28 | )
29 |
30 | #Step 2 - since OCIGenAIEmbeddings accepts only 96 documents in one run , we will input documents in batches.
31 |
32 | # Set the batch size
33 | batch_size = 96
34 |
35 | # Calculate the number of batches
36 | num_batches = len(all_documents) // batch_size + (len(all_documents) % batch_size > 0)
37 |
38 | db = Chroma(embedding_function=embeddings , persist_directory="./chromadb")
39 | retv = db.as_retriever()
40 |
41 | # Iterate over batches
42 | for batch_num in range(num_batches):
43 | # Calculate start and end indices for the current batch
44 | start_index = batch_num * batch_size
45 | end_index = (batch_num + 1) * batch_size
46 | # Extract documents for the current batch
47 | batch_documents = all_documents[start_index:end_index]
48 | # Your code to process each document goes here
49 | retv.add_documents(batch_documents)
50 | print(start_index, end_index)
51 |
52 | #Step 4 - here we persist the collection
53 | db.persist()
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/demos/module4/demo-create-eval-dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from uuid import uuid4
3 | from langsmith import Client
4 |
5 | unique_id = uuid4().hex[0:8]
6 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
7 | os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
8 | os.environ["LANGCHAIN_API_KEY"] = "<>" # Update to your API k
9 |
10 | # create dataset for evaluation
11 | dataset_inputs = [
12 | "Tell us about Oracle Cloud Infrastructure AI Foundations Course and Certification",
13 | "Tell us which module in this course is relevant to Deep Learning.",
14 | "Tell us about which module is relevant to LLMs and Transformers",
15 | "Tell me about instructors of this course"
16 | # ... add more as desired
17 | ]
18 |
19 | # Outputs are provided to the evaluator, so it knows what to compare to
20 | # Outputs are optional but recommended.
21 | dataset_outputs = [
22 | {"must_mention": ["AI", "LLM"]},
23 | {"must_mention": ["CNN", "Neural Network"]},
24 | {"must_mention": ["Module 5", "Transformer", "LLM"]},
25 | {"must_mention": ["Hemant", "Himanshu", "Nick"]}
26 | ]
27 |
28 | client = Client()
29 | dataset_name = "AIFoundationsDS-111"
30 |
31 | # Storing inputs in a dataset lets us
32 | # run chains and LLMs over a shared set of examples.
33 | dataset = client.create_dataset(
34 | dataset_name=dataset_name,
35 | description="AI Foundations QA.",
36 | )
37 | client.create_examples(
38 | inputs=[{"question": q} for q in dataset_inputs],
39 | outputs=dataset_outputs,
40 | dataset_id=dataset.id,
41 | )
42 |
--------------------------------------------------------------------------------
/demos/module4/demo-eval-model.py:
--------------------------------------------------------------------------------
1 | import os
2 | from uuid import uuid4
3 | import langsmith
4 | from langchain import smith
5 | from langchain.smith import RunEvalConfig
6 |
7 | from langchain_community.vectorstores import FAISS
8 | from langchain_community.embeddings import OCIGenAIEmbeddings
9 | from langchain.chains import RetrievalQA
10 |
11 | unique_id = uuid4().hex[0:8]
12 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
13 | os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
14 | os.environ["LANGCHAIN_API_KEY"] = "<>>" # Update to your API ke
15 |
16 | from langchain_community.llms import OCIGenAI
17 |
18 | #In this demo we will create a dataset for model evaluation.
19 |
20 | # use default authN method API-key
21 | llm = OCIGenAI(
22 | model_id="cohere.command",
23 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
24 | compartment_id="<>",
25 | model_kwargs={"max_tokens":400}
26 | )
27 |
28 | embeddings = OCIGenAIEmbeddings(
29 | model_id="cohere.embed-english-v3.0",
30 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
31 | compartment_id="<>",
32 | )
33 |
34 | # Step 4 - here we load the index and create a retriever that gets relevant documents (similar in meaning to a query)
35 |
36 | db = FAISS.load_local("faiss_index", embeddings)
37 |
38 | retv = db.as_retriever(search_kwargs={"k": 8})
39 |
40 |
41 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv)
42 |
43 | # Define the evaluators to apply
44 | #Default criteria are implemented for the following aspects: conciseness, relevance,
45 | # correctness, coherence, harmfulness, maliciousness, helpfulness, controversiality, misogyny, and criminality.
46 |
47 | eval_config = smith.RunEvalConfig(
48 | evaluators=[
49 | "cot_qa",
50 | RunEvalConfig.Criteria("relevance"),
51 | ],
52 | custom_evaluators=[],
53 | eval_llm=llm
54 | )
55 |
56 | client = langsmith.Client()
57 |
58 | chain_results = client.run_on_dataset(
59 | dataset_name="AIFoundationsDS-111",
60 | llm_or_chain_factory=chain,
61 | evaluation=eval_config,
62 | concurrency_level=5,
63 | verbose=True,
64 | )
65 |
--------------------------------------------------------------------------------
/demos/module4/demo-faiss-create.py:
--------------------------------------------------------------------------------
1 | from langchain_community.embeddings import OCIGenAIEmbeddings
2 | from langchain_community.vectorstores import FAISS
3 | from langchain.text_splitter import RecursiveCharacterTextSplitter
4 | from langchain_community.document_loaders import PyPDFDirectoryLoader
5 |
6 |
7 | pdf_loader = PyPDFDirectoryLoader("./pdf-docs" )
8 | pages_dir = pdf_loader.load()
9 | #print(len(pages_dir))
10 |
11 | loaders = [pdf_loader]
12 | documents = []
13 | for loader in loaders:
14 | documents.extend(loader.load())
15 |
16 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
17 | all_documents = text_splitter.split_documents(documents)
18 |
19 | print(f"Total number of documents: {len(all_documents)}")
20 |
21 | #Step 1 - setup OCI Generative AI llm
22 |
23 | embeddings = OCIGenAIEmbeddings(
24 | model_id="cohere.embed-english-v3.0",
25 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
26 | compartment_id="<>",
27 | model_kwargs={"truncate":True}
28 | )
29 |
30 | #Step 1 - index documents and persist
31 |
32 | # Set the batch size
33 | batch_size = 96
34 |
35 | # Calculate the number of batches
36 | num_batches = len(all_documents) // batch_size + (len(all_documents) % batch_size > 0)
37 |
38 |
39 | texts = ["FAISS is an important library", "LangChain supports FAISS"]
40 | db = FAISS.from_texts(texts, embeddings)
41 | retv = db.as_retriever()
42 |
43 | # Iterate over batches
44 | for batch_num in range(num_batches):
45 | # Calculate start and end indices for the current batch
46 | start_index = batch_num * batch_size
47 | end_index = (batch_num + 1) * batch_size
48 | # Extract documents for the current batch
49 | batch_documents = all_documents[start_index:end_index]
50 | # Your code to process each document goes here
51 | retv.add_documents(batch_documents)
52 | print(start_index, end_index)
53 |
54 |
55 | db.save_local("faiss_index")
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/demos/module4/demo-memory.py:
--------------------------------------------------------------------------------
1 | from langchain.memory.buffer import ConversationBufferMemory
2 | from langchain.memory import ConversationSummaryMemory
3 | from langchain.chains import LLMChain
4 | from langchain.prompts import (
5 | ChatPromptTemplate,
6 | HumanMessagePromptTemplate,
7 | SystemMessagePromptTemplate,
8 | )
9 | from langchain_community.llms import OCIGenAI
10 |
11 | #In this demo we will explore using LanChain Memory to store chat history
12 |
13 |
14 | #Step 1 - setup OCI Generative AI llm
15 |
16 | # use default authN method API-key
17 | llm = OCIGenAI(
18 | model_id="cohere.command-light",
19 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
20 | compartment_id="<>",
21 | model_kwargs={"max_tokens":100}
22 | )
23 |
24 | #Step 2 - here we craete a Prompt
25 | prompt = ChatPromptTemplate(
26 | messages=[
27 | SystemMessagePromptTemplate.from_template(
28 | "You are a nice chatbot who explain in steps."
29 | ),
30 | HumanMessagePromptTemplate.from_template("{question}"),
31 | ]
32 | )
33 |
34 | #Step 3 - here we create a memory to remember our chat with the llm
35 |
36 | memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
37 |
38 | summary_memory = ConversationSummaryMemory(llm=llm , memory_key="chat_history")
39 |
40 | #Step 4 - here we create a conversation chain using llm , prompt and memory
41 |
42 | conversation = LLMChain(llm=llm, prompt=prompt, verbose=True, memory=summary_memory)
43 |
44 | #Step 5 - here we invoke a chain. Notice that we just pass in the `question` variables - `chat_history` gets populated by memory
45 | conversation.invoke({"question": "What is the capital of India"})
46 |
47 | #Step 6 - here we print all the messagess in the memory
48 |
49 | print(memory.chat_memory.messages)
50 | print(summary_memory.chat_memory.messages)
51 | print("Summary of the conversation is-->"+summary_memory.buffer)
52 |
53 | #Step 7 - here we ask a another question
54 |
55 | conversation.invoke({"question": "what is oci data science certification?"})
56 |
57 | #Step 8 - here we print all the messagess in the memory again and see that our last question and response is printed.
58 | print(memory.chat_memory.messages)
59 | print(summary_memory.chat_memory.messages)
60 | print("Summary of the conversation is-->"+summary_memory.buffer)
61 |
62 |
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/demos/module4/demo-oci-genai-ads.py:
--------------------------------------------------------------------------------
1 | import ads
2 |
3 | ads.hello()
4 |
5 | ads.set_auth(auth="api_key")
6 | compartment_id="<>"
7 |
8 | from ads.llm import GenerativeAI
9 |
10 | llm = GenerativeAI(
11 | compartment_id=compartment_id,
12 | # Optionally you can specify keyword arguments for the OCI client, e.g. service_endpoint.
13 | client_kwargs={
14 | "service_endpoint": "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
15 | },
16 | )
17 |
18 | response = llm.invoke("Translate the following sentence into French:\nHow are you?\n")
19 |
20 | print(response)
21 |
--------------------------------------------------------------------------------
/demos/module4/demo-ou-chatbot-chroma-final.py:
--------------------------------------------------------------------------------
1 | import chromadb
2 | from langchain_community.vectorstores import Chroma
3 | from chromadb.config import Settings
4 |
5 | from langchain.memory import ConversationBufferMemory
6 | from langchain.chains import ConversationalRetrievalChain
7 | from langchain.retrievers import ContextualCompressionRetriever
8 | from langchain.retrievers.document_compressors import LLMChainExtractor
9 | from langchain.retrievers.document_compressors import CohereRerank
10 | import json
11 | from langchain_community.llms import OCIGenAI
12 | from langchain_community.embeddings import OCIGenAIEmbeddings
13 | import oci
14 |
15 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
16 | # We will use Chroma vectorstore.
17 |
18 |
19 | #Step 1 - this will set up chain , to be called later
20 |
21 | def create_chain():
22 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
23 | embeddings = OCIGenAIEmbeddings(
24 | model_id="cohere.embed-english-v3.0",
25 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
26 | compartment_id="<>"
27 | )
28 | db = Chroma(client=client, embedding_function=embeddings)
29 | #retv = db.as_retriever(search_type="mmr", search_kwargs={"k": 7})
30 | retv = db.as_retriever(search_kwargs={"k": 8})
31 |
32 | llm = OCIGenAI(
33 | model_id="cohere.command",
34 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
35 | compartment_id="<>",
36 | model_kwargs={"max_tokens":200}
37 | )
38 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True, output_key='answer')
39 |
40 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv , memory=memory,
41 | return_source_documents=True)
42 | return qa
43 |
44 | #Step 2 - create chain, here we create a ConversationalRetrievalChain.
45 |
46 | chain = create_chain()
47 |
48 | #Step 3 - here we declare a chat function
49 | def chat(user_message):
50 | # generate a prediction for a prompt
51 | bot_json = chain.invoke({"question": user_message})
52 | print(bot_json)
53 | return {"bot_response": bot_json}
54 |
55 | #Step 4 - here we setup Streamlit text input and pass input text to chat function.
56 | # chat function returns the response and we print it.
57 |
58 | if __name__ == "__main__":
59 | import streamlit as st
60 | st.subheader("OU Chatbot powered by OCI Generative AI Service")
61 | col1 , col2 = st.columns([4,1])
62 |
63 | user_input = st.chat_input()
64 | with col1:
65 | col1.subheader("------Ask me a question about OCI courses------")
66 | #col2.subheader("References")
67 | if "messages" not in st.session_state:
68 | st.session_state.messages = []
69 | if user_input:
70 | bot_response = chat(user_input)
71 | st.session_state.messages.append({"role" : "chatbot", "content" : bot_response})
72 | #st.write("OU Assistant Response: ", bot_response)
73 | for message in st.session_state.messages:
74 | st.chat_message("user")
75 | st.write("Question: ", message['content']['bot_response']['question'])
76 | st.chat_message("assistant")
77 | st.write("Answer: ", message['content']['bot_response']['answer'])
78 | #with col2:
79 | st.chat_message("assistant")
80 | for doc in message['content']['bot_response']['source_documents']:
81 | st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']))
82 |
83 | #st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']) +
84 | # " \n"+ "-relevance score->"+ str(doc.metadata['relevance_score'])
85 | # )
86 |
--------------------------------------------------------------------------------
/demos/module4/demo-prompts.py:
--------------------------------------------------------------------------------
1 | from langchain_core.prompts import PromptTemplate
2 | from langchain_core.prompts import ChatPromptTemplate
3 | from langchain_core.prompts import HumanMessagePromptTemplate
4 | from langchain_core.messages import HumanMessage, SystemMessage
5 |
6 | from langchain_community.llms import OCIGenAI
7 | import oci
8 |
9 | #langchain.debug = True
10 |
11 | #In this demo we will explore using LanChain Prompt templates
12 |
13 |
14 | #Step 1 - setup OCI Generative AI llm
15 |
16 | # Service endpoint
17 | endpoint = "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
18 |
19 | # use default authN method
20 | llm = OCIGenAI(
21 | model_id="cohere.command",
22 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
23 | compartment_id="<>",
24 | model_kwargs={"max_tokens":200}
25 | )
26 |
27 | #Step 2 - invoke llm with a fixed text input
28 |
29 | response = llm.invoke("Tell me one fact about space", temperature=0.7)
30 | print("Case1 Response - > "+ response)
31 |
32 | #Step 3 - Use String Prompt to accept text input. Here we create a template and declare a input variable {human_input}
33 |
34 | #String prompt
35 |
36 | template = """You are a chatbot having a conversation with a human.
37 | Human: {human_input} + {city}
38 | :"""
39 |
40 | #Step 4 - here we create a Prompt using the template
41 |
42 | prompt = PromptTemplate(input_variables=["human_input", "city"], template=template)
43 |
44 | prompt_val = prompt.invoke({"human_input":"Tell us in a exciting tone about", "city":"Las Vegas"})
45 | print("Prompt String is ->")
46 | print(prompt_val.to_string())
47 |
48 | #Step 5 - here we declare a chain that begins with a prompt, next llm and finally output parser
49 |
50 | chain = prompt | llm
51 |
52 | #Step 6 - Next we invoke a chain and provide input question
53 |
54 | response = chain.invoke({"human_input":"Tell us in a exciting tone about", "city":"Las Vegas"})
55 |
56 | #Step 7 - print the prompt and response from the llm
57 |
58 | print("Case2 Response - >" + response)
59 |
60 |
61 | #Step 8 - Use Chat Message Prompt to accept text input. Here we create a chat template and use HumanMessage and SystemMessage
62 |
63 | prompt = ChatPromptTemplate.from_messages(
64 | [
65 | ("system", "You are a chatbot that explains in steps."),
66 | ("ai", "I shall explain in steps"),
67 | ("human", "{input}"),
68 | ]
69 | )
70 |
71 | chain = prompt | llm
72 | response = chain.invoke({"input": "What's the New York culture like?"})
73 | print("Case3 Response - > "+ response)
74 |
75 | #Step10 - another example with .from_template()
76 |
77 | prompt = ChatPromptTemplate.from_template("Tell me a joke about {animal}")
78 | chain1 = prompt | llm
79 | response = chain1.invoke({"animal": "zebra"})
80 | print("Case4 Response - > "+ response)
81 |
82 | #another example
83 |
84 | chat_template = ChatPromptTemplate.from_messages(
85 | [
86 | SystemMessage(
87 | content=(
88 | "You are a helpful assistant that re-writes the user's text to "
89 | "sound more upbeat."
90 | )
91 | ),
92 | HumanMessagePromptTemplate.from_template("{text}"),
93 | ]
94 | )
95 |
96 | chain2 = chat_template | llm
97 | response = chain2.invoke({"text":"I don't like eating tasty things"})
98 | print("Case5 Response ->")
99 |
100 | print(response)
101 |
102 |
--------------------------------------------------------------------------------
/demos/module4/demo-retrieval-chroma.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | import chromadb
3 | from langchain_community.vectorstores import Chroma
4 | from langchain_community.llms import OCIGenAI
5 | from langchain_community.embeddings import OCIGenAIEmbeddings
6 |
7 | #In this demo we will explore using Streamlit to input a question to llm and display the response
8 |
9 | #Step 1 - setup OCI Generative AI llm
10 |
11 | # use default authN method API-key
12 | llm = OCIGenAI(
13 | model_id="cohere.command-light",
14 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
15 | compartment_id="<>",
16 | model_kwargs={"max_tokens":100}
17 | )
18 |
19 | #Step 2 - here we connect to a chromadb server. we need to run the chromadb server before we connect to it
20 |
21 | client = chromadb.HttpClient(host="127.0.0.1")
22 |
23 | #Step 3 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
24 |
25 | embeddings = OCIGenAIEmbeddings(
26 | model_id="cohere.embed-english-v3.0",
27 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
28 | compartment_id="<>",
29 | )
30 |
31 | #Step 4 - here we create a retriever that gets relevant documents (similar in meaning to a query)
32 |
33 | db = Chroma(client=client, embedding_function=embeddings)
34 |
35 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
36 |
37 | #Step 5 - here we can explore how similar documents to the query are returned by prining the document metadata. This step is optional
38 |
39 | docs = retv.get_relevant_documents('Tell us which module is most relevant to LLMs and Generative AI')
40 |
41 | def pretty_print_docs(docs):
42 | print(
43 | f"\n{'-' * 100}\n".join(
44 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
45 | )
46 | )
47 |
48 | pretty_print_docs(docs)
49 |
50 | for doc in docs:
51 | print(doc.metadata)
52 |
53 | #Step 6 - here we create a retrieval chain that takes llm , retirever objects and invoke it to get a response to our query
54 |
55 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv,return_source_documents=True)
56 |
57 | response = chain.invoke("Tell us which module is most relevant to LLMs and Generative AI")
58 |
59 | print(response)
60 |
61 |
62 |
--------------------------------------------------------------------------------
/demos/module4/demo-retrieval-faiss.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | from langchain_community.vectorstores import FAISS
3 |
4 | from langchain_community.llms import OCIGenAI
5 | from langchain_community.embeddings import OCIGenAIEmbeddings
6 |
7 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
8 | # We will use FASSS vectorstore.
9 |
10 | #Step 1 - setup OCI Generative AI llm
11 |
12 | # use default authN method API-key
13 | llm = OCIGenAI(
14 | model_id="cohere.command-light",
15 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
16 | compartment_id="<>",
17 | model_kwargs={"max_tokens":100}
18 | )
19 |
20 | #Step 2 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
21 |
22 | embeddings = OCIGenAIEmbeddings(
23 | model_id="cohere.embed-english-v3.0",
24 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
25 | compartment_id="compartment_id="<>",
26 | )
27 |
28 | #Step 4 - here we load the index and create a retriever that gets relevant documents (similar in meaning to a query)
29 |
30 | db = FAISS.load_local("faiss_index", embeddings)
31 |
32 | #retv = db.as_retriever(search_kwargs={"k": 3})
33 |
34 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
35 |
36 | #Step 5 - here we can explore how similar documents to the query are returned by prining the document metadata. This step is optional
37 |
38 | docs = retv.get_relevant_documents('Tell us which module is most relevant to LLMs and Generative AI')
39 |
40 | def pretty_print_docs(docs):
41 | print(
42 | f"\n{'-' * 100}\n".join(
43 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
44 | )
45 | )
46 |
47 | pretty_print_docs(docs)
48 |
49 | for doc in docs:
50 | print(doc.metadata)
51 |
52 |
53 | #Step 6 - here we create a retrieval chain that takes llm , retirever objects and invoke it to get a response to our query
54 |
55 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv,return_source_documents=True)
56 |
57 | response = chain.invoke("Tell us which module is relevant to LLMs and Generative AI")
58 |
59 | print(response)
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/demos/module4/demo-retrieval-memory-chroma-traces.py:
--------------------------------------------------------------------------------
1 | from langchain.memory import ConversationBufferMemory
2 | from langchain.chains import ConversationalRetrievalChain
3 | import chromadb
4 | from langchain_community.vectorstores import Chroma
5 | from chromadb.config import Settings
6 |
7 | from langchain_community.llms import OCIGenAI
8 | from langchain_community.embeddings import OCIGenAIEmbeddings
9 |
10 | import os
11 | from uuid import uuid4
12 |
13 | unique_id = uuid4().hex[0:8]
14 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
15 | os.environ["LANGCHAIN_PROJECT"] = f"Test111 - {unique_id}"
16 | os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
17 | os.environ["LANGCHAIN_API_KEY"] = "<>" # Update to your API ke
18 |
19 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
20 | # We will use Chroma vectorstore.
21 |
22 |
23 | #Step 1 - setup OCI Generative AI llm
24 |
25 | # use default authN method API-key
26 | llm = OCIGenAI(
27 | model_id="cohere.command",
28 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
29 | compartment_id="<>",
30 | model_kwargs={"max_tokens":400}
31 | )
32 |
33 | #Step 2 - here we connect to a chromadb server. we need to run the chromadb server before we connect to it
34 |
35 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
36 |
37 | #Step 3 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
38 |
39 | embeddings = OCIGenAIEmbeddings(
40 | model_id="cohere.embed-english-v3.0",
41 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
42 | compartment_id="<>",
43 | )
44 |
45 | #Step 4 - here we create a retriever that gets relevant documents (similar in meaning to a query)
46 |
47 | db = Chroma(client=client, embedding_function=embeddings)
48 |
49 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 8})
50 |
51 | def pretty_print_docs(docs):
52 | print(
53 | f"\n{'-' * 100}\n".join(
54 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
55 | )
56 | )
57 |
58 | #Step 5 - here we create a memory to remember chat messages.
59 |
60 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True, output_key='answer')
61 |
62 |
63 | #Step 6 - here we create a chain that uses llm, retriever and memory.
64 |
65 | #You can also define the chain type as one of the four options: “stuff”, “map reduce”, “refine”, “map_rerank”.
66 |
67 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv, memory=memory, return_source_documents=True)
68 |
69 | response = qa.invoke({"question": "Tell us about Oracle Cloud Infrastructure AI Foundations course"})
70 | print(memory.chat_memory.messages)
71 |
72 |
73 | response = qa.invoke({"question": "Which module of the course is relevant to the LLMs and Transformers"})
74 | print(memory.chat_memory.messages)
75 |
76 | print(response)
77 |
--------------------------------------------------------------------------------
/demos/module4/demo-runnable-parallel.py:
--------------------------------------------------------------------------------
1 | import ads
2 | from langchain_core.runnables import RunnableParallel
3 | from langchain_core.prompts import ChatPromptTemplate
4 |
5 | ads.set_auth(auth="api_key")
6 | compartment_id="<>"
7 |
8 | from ads.llm import GenerativeAI
9 |
10 | model = GenerativeAI(
11 | compartment_id=compartment_id,
12 | # Optionally you can specify keyword arguments for the OCI client, e.g. service_endpoint.
13 | client_kwargs={
14 | "service_endpoint": "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
15 | },
16 | )
17 |
18 | chain1 = ChatPromptTemplate.from_template("tell me a joke about {topic1}") | model
19 | chain2 = ChatPromptTemplate.from_template("write a short (2 line) poem about {topic2}") | model
20 |
21 | combined = RunnableParallel(joke=chain1, poem=chain2)
22 |
23 | response = combined.invoke({"topic1":"pig","topic2":"parrot"})
24 |
25 | print(response)
26 |
--------------------------------------------------------------------------------
/demos/module4/demo-sessions.py:
--------------------------------------------------------------------------------
1 | from langchain.memory import ConversationBufferMemory
2 | from langchain_community.chat_message_histories import StreamlitChatMessageHistory
3 | from langchain.chains import LLMChain
4 | from langchain.prompts import PromptTemplate
5 | from langchain_community.llms import OCIGenAI
6 |
7 |
8 | #In this demo we will explore using Streamlit session to store chat messages
9 |
10 |
11 | #Step 1 - setup OCI Generative AI llm
12 |
13 | # use default authN method API-key
14 | llm = OCIGenAI(
15 | model_id="cohere.command-light",
16 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
17 | compartment_id="<>",
18 | model_kwargs={"max_tokens":100}
19 | )
20 |
21 | #Step 2 - here we create a history with a key "chat_messages.
22 |
23 | #StreamlitChatMessageHistory will store messages in Streamlit session state at the specified key=.
24 | #A given StreamlitChatMessageHistory will NOT be persisted or shared across user sessions.
25 |
26 | history = StreamlitChatMessageHistory(key="chat_messages")
27 |
28 | #Step 3 - here we create a memory object
29 |
30 | memory = ConversationBufferMemory(chat_memory=history)
31 |
32 | #Step 4 - here we create template and prompt to accept a question
33 |
34 | template = """You are an AI chatbot having a conversation with a human.
35 | Human: {human_input}
36 | AI: """
37 | prompt = PromptTemplate(input_variables=["human_input"], template=template)
38 |
39 | #Step 5 - here we create a chain object
40 |
41 | llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory)
42 |
43 | #Step 6 - here we use streamlit to print all messages in the memory, create text imput, run chain and
44 | #the question and response is automatically put in the StreamlitChatMessageHistory
45 |
46 | import streamlit as st
47 |
48 | st.title('🦜🔗 Welcome to the ChatBot')
49 | for msg in history.messages:
50 | st.chat_message(msg.type).write(msg.content)
51 |
52 | if x := st.chat_input():
53 | st.chat_message("human").write(x)
54 |
55 | # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
56 | response = llm_chain.run(x)
57 | st.chat_message("ai").write(response)
58 |
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/demos/module4/demo-streamlit.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from langchain_community.llms import OCIGenAI
3 | import oci
4 |
5 | #In this demo we will explore using Streamlit to input a question to llm and display the response
6 |
7 | #Step 1 - authenticate using "DEFAULT" profile
8 |
9 | compartment_id = "ocid1.compartment.oc1..aaaaaaaah3o77etbcfg2o25jxks2pucmyrz6veg26z5lgpx3q355nikleemq"
10 | CONFIG_PROFILE = "DEFAULT"
11 | config = oci.config.from_file('~/.oci/config', CONFIG_PROFILE)
12 |
13 | #Step 2 - setup OCI Generative AI llm
14 |
15 | # Service endpoint
16 | endpoint = "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
17 |
18 | # use default authN method API-key
19 | llm = OCIGenAI(
20 | model_id="cohere.command-light",
21 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
22 | compartment_id="ocid1.compartment.oc1..aaaaaaaah3o77etbcfg2o25jxks2pucmyrz6veg26z5lgpx3q355nikleemq",
23 | model_kwargs={"max_tokens":100}
24 | )
25 |
26 | #Step 3 - we define a function to return response
27 |
28 | def generate_response(input_text):
29 | st.info(llm(input_text))
30 |
31 | #Step 4 - here we write a quick streamlit application that accepts text input (question) and
32 | # on clicking a 'submit button call a function that generates response
33 |
34 | st.title('🦜🔗 Welcome to the ChatBot')
35 | with st.form('my_form'):
36 | text = st.text_area('Enter text:', 'What are the three key pieces of advice for learning how to code?')
37 | submitted = st.form_submit_button('Submit')
38 | if submitted :
39 | generate_response(text)
40 |
--------------------------------------------------------------------------------
/demos/module4_new/LoadProperties.py:
--------------------------------------------------------------------------------
1 | class LoadProperties:
2 |
3 | def __init__(self):
4 |
5 | import json
6 | # reading the data from the file
7 | with open('config.txt') as f:
8 | data = f.read()
9 |
10 | js = json.loads(data)
11 |
12 | self.model_name = js["model_name"]
13 | self.endpoint = js["endpoint"]
14 | self.compartment_ocid = js["compartment_ocid"]
15 | self.embedding_model_name=js["embedding_model_name"]
16 | self.langchain_key = js["langchain_key"]
17 | self.langchain_endpoint = js["langchain_endpoint"]
18 |
19 | def getModelName(self):
20 | return self.model_name
21 |
22 | def getEndpoint(self):
23 | return self.endpoint
24 |
25 | def getCompartment(self):
26 | return self.compartment_ocid
27 |
28 | def getEmbeddingModelName(self):
29 | return self.embedding_model_name
30 |
31 | def getLangChainKey(self):
32 | return self.langchain_key
33 |
34 | def getlangChainEndpoint(self):
35 | return self.langchain_endpoint
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/demos/module4_new/config.txt:
--------------------------------------------------------------------------------
1 | {"model_name":"cohere.command-r-16k",
2 | "embedding_model_name":"cohere.embed-english-v3.0",
3 | "endpoint":"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
4 | "compartment_ocid":"",
5 | "langchain_endpoint":"https://api.smith.langchain.com",
6 | "langchain_key":""
7 | }
--------------------------------------------------------------------------------
/demos/module4_new/demo-RAG-Oracle23ai-Retrieval.py:
--------------------------------------------------------------------------------
1 | import oracledb
2 | from langchain_community.vectorstores.utils import DistanceStrategy
3 | from langchain_core.prompts import PromptTemplate
4 | from langchain_core.runnables import RunnablePassthrough
5 | from langchain_core.output_parsers import StrOutputParser
6 | from langchain_community.vectorstores.oraclevs import OracleVS
7 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
8 | from langchain_community.embeddings import OCIGenAIEmbeddings
9 | print("Successfully imported libraries and modules")
10 |
11 | #Declare username and password and dsn (data connection string)
12 | username = ""
13 | password = ""
14 | dsn = ''''''
15 |
16 | # Connect to the database
17 | try:
18 | conn23c = oracledb.connect(user=username, password=password, dsn=dsn)
19 | print("Connection successful!")
20 | except Exception as e:
21 | print("Connection failed!")
22 |
23 | # Retrieval Step 1 - Build the llm , embed_model and prompt to query the document
24 | COMPARTMENT_OCID = ""
25 |
26 | llm = ChatOCIGenAI(
27 | model_id="meta.llama-3-70b-instruct",
28 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
29 | compartment_id=COMPARTMENT_OCID,
30 | model_kwargs={"temperature": 0.7, "max_tokens": 400},
31 | )
32 |
33 | embed_model = OCIGenAIEmbeddings(
34 | model_id="cohere.embed-english-v3.0",
35 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
36 | compartment_id=COMPARTMENT_OCID
37 | )
38 |
39 | # Set up the template for the questions and context, and instantiate the database retriever object
40 | template = """Answer the question based only on the following context:
41 | {context} Question: {question} """
42 | prompt = PromptTemplate.from_template(template)
43 |
44 |
45 | # Retrieval Step 2 - Create retriever without ingesting documents again.
46 |
47 | vs = OracleVS(embedding_function=embed_model, client=conn23c, table_name="MY_DEMO", distance_strategy=DistanceStrategy.DOT_PRODUCT)
48 |
49 | retriever = vs.as_retriever(search_type="similarity", search_kwargs={'k': 3})
50 |
51 | chain = (
52 | {"context": retriever, "question": RunnablePassthrough()}
53 | | prompt
54 | | llm
55 | | StrOutputParser()
56 | )
57 |
58 | user_question = ("Tell us about Module 4 of AI Foundations Certification course.")
59 |
60 | response = chain.invoke(user_question)
61 |
62 | print("User questions was ->", user_question)
63 | print("LLM response is->", response)
--------------------------------------------------------------------------------
/demos/module4_new/demo-RAG-oracle23ai-Embed.py:
--------------------------------------------------------------------------------
1 | from PyPDF2 import PdfReader
2 | import oracledb
3 | from langchain.text_splitter import CharacterTextSplitter
4 | from langchain_huggingface import HuggingFaceEmbeddings
5 | from langchain_community.vectorstores.oraclevs import OracleVS
6 | from langchain_community.embeddings import OCIGenAIEmbeddings
7 | from langchain_community.vectorstores.utils import DistanceStrategy
8 | from langchain_core.documents import BaseDocumentTransformer, Document
9 | print("Successfully imported libraries and modules")
10 |
11 | #Declare username and password and dsn (data connection string)
12 | username = ""
13 | password = ""
14 | dsn = ''''''
15 |
16 | # Connect to the database
17 | try:
18 | conn23c = oracledb.connect(user=username, password=password, dsn=dsn)
19 | print("Connection successful!")
20 | except Exception as e:
21 | print("Connection failed!")
22 |
23 | # RAG Step 1 - Load the document and create pdf reader object
24 |
25 | pdf = PdfReader('./pdf-docs/Oracle Cloud Infrastructure AI Foundations.pdf')
26 |
27 |
28 | # RAG Step 2 - Transform the document to text
29 |
30 | text=""
31 | for page in pdf.pages:
32 | text += page.extract_text()
33 | print("You have transformed the PDF document to text format")
34 |
35 | # RAG Step 3 - Chunk the text document into smaller chunks
36 | text_splitter = CharacterTextSplitter(separator=".",chunk_size=2000,chunk_overlap=100)
37 | chunks = text_splitter.split_text(text)
38 |
39 | # Function to format and add metadata to Oracle 23ai Vector Store
40 |
41 | def chunks_to_docs_wrapper(row: dict) -> Document:
42 | """
43 | Converts text into a Document object suitable for ingestion into Oracle Vector Store.
44 | - row (dict): A dictionary representing a row of data with keys for 'id', 'link', and 'text'.
45 | """
46 | metadata = {'id': row['id'], 'link': row['link']}
47 | return Document(page_content=row['text'], metadata=metadata)
48 |
49 | # RAG Step 4 - Create metadata wrapper to store additional information in the vector store
50 | """
51 | Converts a row from a DataFrame into a Document object suitable for ingestion into Oracle Vector Store.
52 | - row (dict): A dictionary representing a row of data with keys for 'id', 'link', and 'text'.
53 | """
54 | docs = [chunks_to_docs_wrapper({'id': str(page_num), 'link': f'Page {page_num}', 'text': text}) for page_num, text in enumerate(chunks)]
55 |
56 | COMPARTMENT_OCID = ""
57 |
58 | embed_model = OCIGenAIEmbeddings(
59 | model_id="cohere.embed-english-v3.0",
60 | service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
61 | compartment_id=COMPARTMENT_OCID
62 | )
63 |
64 | # RAG Step 5 - Using an embedding model, embed the chunks as vectors into Oracle Database 23ai.
65 |
66 | #model_4db = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
67 |
68 | # RAG Step 6 - Configure the vector store with the model, table name, and using the indicated distance strategy for the similarity search and vectorize the chunks
69 |
70 | knowledge_base = OracleVS.from_documents(docs, embed_model, client=conn23c, table_name="MY_DEMO", distance_strategy=DistanceStrategy.DOT_PRODUCT)
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-chains-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.prompts import ChatPromptTemplate
2 | from langchain.schema import StrOutputParser
3 | from langchain.chains import LLMChain
4 | from LoadProperties import LoadProperties
5 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
6 |
7 | #In this demo we will explore using LanChain Chain using chain class and a declarative approaches.
8 |
9 | #Step 1 - setup OCI Generative AI llm
10 |
11 | properties = LoadProperties()
12 |
13 | llm = ChatOCIGenAI(
14 | model_id=properties.getModelName(),
15 | service_endpoint=properties.getEndpoint(),
16 | compartment_id=properties.getCompartment(),
17 | model_kwargs={"max_tokens":200}
18 | )
19 |
20 | #Step 2 - use chat messge template and messages and pass {question}
21 |
22 | prompt = ChatPromptTemplate.from_messages([
23 | ("system", "You're a very knowledgeable scientist who provides accurate and eloquent answers to scientific questions."),
24 | ("human", "{question}")
25 | ])
26 |
27 | #Step 3 - create a chain using LLMChain class and invoke a chain to get a response.
28 | #legacy chain
29 |
30 | #chain = LLMChain(llm=llm, prompt=prompt, output_parser=StrOutputParser())
31 | #response = chain.invoke({"question":"What are basic elements of a matter"})
32 | #print("Response from legacy chain")
33 | #print(response)
34 |
35 | #Step 4 - here we use langchain expression language to compose a chain and invoke it.
36 | #lecl chain
37 |
38 | runnable = prompt | llm | StrOutputParser()
39 | response = runnable.invoke({"question": "What are basic elements of a matter"})
40 | print("Response from LECL Chain")
41 | print(response)
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-chroma-create-v1.py:
--------------------------------------------------------------------------------
1 | from langchain_community.embeddings import OCIGenAIEmbeddings
2 | from langchain.text_splitter import RecursiveCharacterTextSplitter
3 | from langchain_community.document_loaders import PyPDFDirectoryLoader
4 | from langchain_community.vectorstores.chroma import Chroma
5 | from LoadProperties import LoadProperties
6 |
7 | #This demo creates Chroma Vector Store
8 |
9 | # Step 1 - load and split documents
10 |
11 | pdf_loader = PyPDFDirectoryLoader("./pdf-docs" )
12 | loaders = [pdf_loader]
13 |
14 | documents = []
15 | for loader in loaders:
16 | documents.extend(loader.load())
17 |
18 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=2500, chunk_overlap=100)
19 | all_documents = text_splitter.split_documents(documents)
20 |
21 | print(f"Total number of documents: {len(all_documents)}")
22 |
23 | #Step 2 - setup OCI Generative AI llm
24 |
25 | properties = LoadProperties()
26 |
27 | embeddings = OCIGenAIEmbeddings(
28 | model_id=properties.getEmbeddingModelName(),
29 | service_endpoint=properties.getEndpoint(),
30 | compartment_id=properties.getCompartment(),
31 | model_kwargs={"truncate":True}
32 | )
33 |
34 | #Step 3 - since OCIGenAIEmbeddings accepts only 96 documents in one run , we will input documents in batches.
35 |
36 | # Set the batch size
37 | batch_size = 96
38 |
39 | # Calculate the number of batches
40 | num_batches = len(all_documents) // batch_size + (len(all_documents) % batch_size > 0)
41 |
42 | db = Chroma(embedding_function=embeddings , persist_directory="./chromadb")
43 | retv = db.as_retriever()
44 |
45 | # Iterate over batches
46 | for batch_num in range(num_batches):
47 | # Calculate start and end indices for the current batch
48 | start_index = batch_num * batch_size
49 | end_index = (batch_num + 1) * batch_size
50 | # Extract documents for the current batch
51 | batch_documents = all_documents[start_index:end_index]
52 | # Your code to process each document goes here
53 | retv.add_documents(batch_documents)
54 | print(start_index, end_index)
55 |
56 | #Step 4 - here we persist the collection
57 | #Since Chroma 0.4.x the manual persistence method is no longer supported as docs are automatically persisted.
58 | #db.persist()
59 |
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-create-eval-dataset-v1.py:
--------------------------------------------------------------------------------
1 | import os
2 | from uuid import uuid4
3 | from langsmith import Client
4 | from LoadProperties import LoadProperties
5 |
6 | #In this demo we create dataset for model evaluation
7 |
8 | properties = LoadProperties()
9 |
10 | unique_id = uuid4().hex[0:8]
11 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
12 | os.environ["LANGCHAIN_ENDPOINT"] = properties.getlangChainEndpoint()
13 | os.environ["LANGCHAIN_API_KEY"] = properties.getLangChainKey() # Update to your API key
14 |
15 | # create dataset for evaluation
16 | dataset_inputs = [
17 | "Tell us about Oracle Cloud Infrastructure AI Foundations Course and Certification",
18 | "Tell us which module in this course is relevant to Deep Learning.",
19 | "Tell us about which module is relevant to LLMs and Transformers",
20 | "Tell me about instructors of this course"
21 | # ... add more as desired
22 | ]
23 |
24 | # Outputs are provided to the evaluator, so it knows what to compare to
25 | # Outputs are optional but recommended.
26 | dataset_outputs = [
27 | {"must_mention": ["AI", "LLM"]},
28 | {"must_mention": ["CNN", "Neural Network"]},
29 | {"must_mention": ["Module 5", "Transformer", "LLM"]},
30 | {"must_mention": ["Hemant", "Himanshu", "Nick"]}
31 | ]
32 |
33 | client = Client()
34 | dataset_name = "AIFoundationsDS-112"
35 |
36 | # Storing inputs in a dataset lets us
37 | # run chains and LLMs over a shared set of examples.
38 | dataset = client.create_dataset(
39 | dataset_name=dataset_name,
40 | description="AI Foundations QA.",
41 | )
42 | client.create_examples(
43 | inputs=[{"question": q} for q in dataset_inputs],
44 | outputs=dataset_outputs,
45 | dataset_id=dataset.id,
46 | )
47 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-environment.yml:
--------------------------------------------------------------------------------
1 | name: demo-11
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - bzip2=1.0.8=h6c40b1e_6
7 | - ca-certificates=2024.3.11=hecd8cb5_0
8 | - libffi=3.4.4=hecd8cb5_1
9 | - ncurses=6.4=hcec6c5f_0
10 | - openssl=3.0.14=h46256e1_0
11 | - pip=24.0=py311hecd8cb5_0
12 | - python=3.11.9=hf27a42d_0
13 | - readline=8.2=hca72f7f_0
14 | - setuptools=69.5.1=py311hecd8cb5_0
15 | - sqlite=3.45.3=h6c40b1e_0
16 | - tk=8.6.14=h4d00af3_0
17 | - wheel=0.43.0=py311hecd8cb5_0
18 | - xz=5.4.6=h6c40b1e_1
19 | - zlib=1.2.13=h4b97444_1
20 | - pip:
21 | - aiohttp==3.9.5
22 | - aiosignal==1.3.1
23 | - altair==5.3.0
24 | - annotated-types==0.7.0
25 | - anyio==4.4.0
26 | - asgiref==3.8.1
27 | - asteval==1.0.0
28 | - attrs==23.2.0
29 | - backoff==2.2.1
30 | - bcrypt==4.1.3
31 | - blinker==1.8.2
32 | - build==1.2.1
33 | - cachetools==5.3.3
34 | - cerberus==1.3.5
35 | - certifi==2024.6.2
36 | - cffi==1.16.0
37 | - charset-normalizer==3.3.2
38 | - chroma-hnswlib==0.7.3
39 | - chromadb==0.5.3
40 | - circuitbreaker==1.4.0
41 | - click==8.1.7
42 | - cloudpickle==3.0.0
43 | - coloredlogs==15.0.1
44 | - contourpy==1.2.1
45 | - cryptography==42.0.8
46 | - cycler==0.12.1
47 | - dataclasses-json==0.6.7
48 | - deprecated==1.2.14
49 | - dnspython==2.6.1
50 | - email-validator==2.2.0
51 | - faiss-cpu==1.8.0.post1
52 | - fastapi==0.111.0
53 | - fastapi-cli==0.0.4
54 | - filelock==3.15.4
55 | - flatbuffers==24.3.25
56 | - fonttools==4.53.0
57 | - frozenlist==1.4.1
58 | - fsspec==2024.6.1
59 | - gitdb==4.0.11
60 | - gitpython==3.1.43
61 | - google-auth==2.31.0
62 | - googleapis-common-protos==1.63.2
63 | - greenlet==3.0.3
64 | - grpcio==1.64.1
65 | - h11==0.14.0
66 | - httpcore==1.0.5
67 | - httptools==0.6.1
68 | - httpx==0.27.0
69 | - huggingface-hub==0.23.4
70 | - humanfriendly==10.0
71 | - idna==3.7
72 | - importlib-metadata==7.1.0
73 | - importlib-resources==6.4.0
74 | - inflection==0.5.1
75 | - jinja2==3.1.4
76 | - joblib==1.4.2
77 | - jsonpatch==1.33
78 | - jsonpointer==3.0.0
79 | - jsonschema==4.22.0
80 | - jsonschema-specifications==2023.12.1
81 | - kiwisolver==1.4.5
82 | - kubernetes==30.1.0
83 | - langchain==0.2.6
84 | - langchain-chroma==0.1.2
85 | - langchain-community==0.2.6
86 | - langchain-core==0.2.11
87 | - langchain-text-splitters==0.2.2
88 | - langsmith==0.1.83
89 | - markdown==3.6
90 | - markdown-it-py==3.0.0
91 | - markupsafe==2.1.5
92 | - marshmallow==3.21.3
93 | - matplotlib==3.8.4
94 | - mdurl==0.1.2
95 | - mmh3==4.1.0
96 | - monotonic==1.6
97 | - mpmath==1.3.0
98 | - multidict==6.0.5
99 | - mypy-extensions==1.0.0
100 | - numpy==1.26.4
101 | - oauthlib==3.2.2
102 | - oci==2.129.1
103 | - ocifs==1.3.1
104 | - onnxruntime==1.16.3
105 | - opentelemetry-api==1.25.0
106 | - opentelemetry-exporter-otlp-proto-common==1.25.0
107 | - opentelemetry-exporter-otlp-proto-grpc==1.25.0
108 | - opentelemetry-instrumentation==0.46b0
109 | - opentelemetry-instrumentation-asgi==0.46b0
110 | - opentelemetry-instrumentation-fastapi==0.46b0
111 | - opentelemetry-proto==1.25.0
112 | - opentelemetry-sdk==1.25.0
113 | - opentelemetry-semantic-conventions==0.46b0
114 | - opentelemetry-util-http==0.46b0
115 | - oracle-ads==2.11.14
116 | - orjson==3.10.6
117 | - overrides==7.7.0
118 | - packaging==24.1
119 | - pandas==2.2.2
120 | - pillow==10.4.0
121 | - posthog==3.5.0
122 | - protobuf==4.25.3
123 | - psutil==6.0.0
124 | - pyarrow==16.1.0
125 | - pyasn1==0.6.0
126 | - pyasn1-modules==0.4.0
127 | - pycparser==2.22
128 | - pydantic==2.8.0
129 | - pydantic-core==2.20.0
130 | - pydeck==0.9.1
131 | - pygments==2.18.0
132 | - pyopenssl==24.1.0
133 | - pyparsing==3.1.2
134 | - pypdf==4.2.0
135 | - pypika==0.48.9
136 | - pyproject-hooks==1.1.0
137 | - python-dateutil==2.9.0.post0
138 | - python-dotenv==1.0.1
139 | - python-jsonschema-objects==0.5.5
140 | - python-multipart==0.0.9
141 | - pytz==2024.1
142 | - pyyaml==6.0.1
143 | - referencing==0.35.1
144 | - requests==2.32.3
145 | - requests-oauthlib==2.0.0
146 | - rich==13.7.1
147 | - rpds-py==0.18.1
148 | - rsa==4.9
149 | - scikit-learn==1.5.0
150 | - scipy==1.14.0
151 | - shellingham==1.5.4
152 | - six==1.16.0
153 | - smmap==5.0.1
154 | - sniffio==1.3.1
155 | - sqlalchemy==2.0.31
156 | - starlette==0.37.2
157 | - streamlit==1.36.0
158 | - sympy==1.12.1
159 | - tabulate==0.9.0
160 | - tenacity==8.4.2
161 | - threadpoolctl==3.5.0
162 | - tokenizers==0.19.1
163 | - toml==0.10.2
164 | - toolz==0.12.1
165 | - tornado==6.4.1
166 | - tqdm==4.66.4
167 | - typer==0.12.3
168 | - typing-extensions==4.12.2
169 | - typing-inspect==0.9.0
170 | - tzdata==2024.1
171 | - ujson==5.10.0
172 | - urllib3==2.2.2
173 | - uvicorn==0.30.1
174 | - uvloop==0.19.0
175 | - watchfiles==0.22.0
176 | - websocket-client==1.8.0
177 | - websockets==12.0
178 | - wrapt==1.16.0
179 | - yarl==1.9.4
180 | - zipp==3.19.2
181 | prefix: /opt/anaconda3/envs/demo-11
182 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-eval-model-v1.py:
--------------------------------------------------------------------------------
1 | import os
2 | from uuid import uuid4
3 | import langsmith
4 | from langchain import smith
5 | from langchain.smith import RunEvalConfig
6 |
7 | from langchain_community.vectorstores import FAISS
8 | from langchain_community.embeddings import OCIGenAIEmbeddings
9 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
10 | from langchain.chains import RetrievalQA
11 |
12 | from LoadProperties import LoadProperties
13 |
14 | #In this demo we evaluate the model using the dataset we created
15 |
16 | properties = LoadProperties()
17 |
18 | unique_id = uuid4().hex[0:8]
19 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
20 | os.environ["LANGCHAIN_ENDPOINT"] = properties.getlangChainEndpoint()
21 | os.environ["LANGCHAIN_API_KEY"] = properties.getLangChainKey() # Update to your API ke
22 |
23 |
24 | #In this demo we will create a dataset for model evaluation.
25 |
26 | #Step 1 - create models
27 |
28 | # use default authN method API-key
29 | llm = ChatOCIGenAI(
30 | model_id=properties.getModelName(),
31 | service_endpoint=properties.getEndpoint(),
32 | compartment_id=properties.getCompartment(),
33 | model_kwargs={"max_tokens":400}
34 | )
35 |
36 | embeddings = OCIGenAIEmbeddings(
37 | model_id=properties.getEmbeddingModelName(),
38 | service_endpoint=properties.getEndpoint(),
39 | compartment_id=properties.getCompartment(),
40 | )
41 |
42 | # Step 2 - here we load the index and create a retriever that gets relevant documents (similar in meaning to a query)
43 |
44 | db = FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True)
45 |
46 | retv = db.as_retriever(search_kwargs={"k": 8})
47 |
48 | #Step 3 - crate chain
49 |
50 |
51 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv)
52 |
53 | # Step 4 - Define the evaluators to apply
54 | #Default criteria are implemented for the following aspects: conciseness, relevance,
55 | # correctness, coherence, harmfulness, maliciousness, helpfulness, controversiality, misogyny, and criminality.
56 |
57 | eval_config = smith.RunEvalConfig(
58 | evaluators=[
59 | "cot_qa",
60 | RunEvalConfig.Criteria("relevance"),
61 | ],
62 | custom_evaluators=[],
63 | eval_llm=llm
64 | )
65 |
66 | client = langsmith.Client()
67 |
68 | # Step 5 - evaluate model
69 |
70 | chain_results = client.run_on_dataset(
71 | dataset_name="AIFoundationsDS-111",
72 | llm_or_chain_factory=chain,
73 | evaluation=eval_config,
74 | concurrency_level=5,
75 | verbose=True,
76 | )
77 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-faiss-create-v1.py:
--------------------------------------------------------------------------------
1 | from langchain_community.embeddings import OCIGenAIEmbeddings
2 | from langchain_community.vectorstores import FAISS
3 | from langchain.text_splitter import RecursiveCharacterTextSplitter
4 | from langchain_community.document_loaders import PyPDFDirectoryLoader
5 | from LoadProperties import LoadProperties
6 |
7 | pdf_loader = PyPDFDirectoryLoader("./pdf-docs" )
8 | pages_dir = pdf_loader.load()
9 | #print(len(pages_dir))
10 |
11 | properties = LoadProperties()
12 |
13 | loaders = [pdf_loader]
14 | documents = []
15 | for loader in loaders:
16 | documents.extend(loader.load())
17 |
18 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
19 | all_documents = text_splitter.split_documents(documents)
20 |
21 | print(f"Total number of documents: {len(all_documents)}")
22 |
23 | #Step 1 - setup OCI Generative AI llm
24 |
25 |
26 | embeddings = OCIGenAIEmbeddings(
27 | model_id=properties.getEmbeddingModelName(),
28 | service_endpoint=properties.getEndpoint(),
29 | compartment_id=properties.getCompartment(),
30 | model_kwargs={"truncate":True}
31 | )
32 |
33 | #Step 2 - index documents and persist
34 |
35 | # Set the batch size
36 | batch_size = 96
37 |
38 | # Calculate the number of batches
39 | num_batches = len(all_documents) // batch_size + (len(all_documents) % batch_size > 0)
40 |
41 |
42 | texts = ["FAISS is an important library", "LangChain supports FAISS"]
43 | db = FAISS.from_texts(texts, embeddings)
44 | retv = db.as_retriever()
45 |
46 | # Iterate over batches
47 | for batch_num in range(num_batches):
48 | # Calculate start and end indices for the current batch
49 | start_index = batch_num * batch_size
50 | end_index = (batch_num + 1) * batch_size
51 | # Extract documents for the current batch
52 | batch_documents = all_documents[start_index:end_index]
53 | # Your code to process each document goes here
54 | retv.add_documents(batch_documents)
55 | print(start_index, end_index)
56 |
57 |
58 | db.save_local("faiss_index")
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-memory-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.memory.buffer import ConversationBufferMemory
2 | from langchain.memory import ConversationSummaryMemory
3 | from langchain.chains import LLMChain
4 | from langchain.prompts import (
5 | ChatPromptTemplate,
6 | HumanMessagePromptTemplate,
7 | SystemMessagePromptTemplate,
8 | )
9 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
10 | from LoadProperties import LoadProperties
11 |
12 | #In this demo we will explore using LanChain Memory to store chat history
13 |
14 | #Step 1 - setup OCI Generative AI llm
15 |
16 | properties = LoadProperties()
17 |
18 | # use default authN method API-key
19 | llm = ChatOCIGenAI(
20 | model_id=properties.getModelName(),
21 | service_endpoint=properties.getEndpoint(),
22 | compartment_id=properties.getCompartment(),
23 | model_kwargs={"max_tokens":100}
24 | )
25 |
26 | #Step 2 - here we craete a Prompt
27 | prompt = ChatPromptTemplate(
28 | messages=[
29 | SystemMessagePromptTemplate.from_template(
30 | "You are a nice chatbot who explain in steps."
31 | ),
32 | HumanMessagePromptTemplate.from_template("{question}"),
33 | ]
34 | )
35 |
36 | #Step 3 - here we create a memory to remember our chat with the llm
37 |
38 | memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
39 |
40 | summary_memory = ConversationSummaryMemory(llm=llm , memory_key="chat_history")
41 |
42 | #Step 4 - here we create a conversation chain using llm , prompt and memory
43 |
44 | conversation = LLMChain(llm=llm, prompt=prompt, verbose=True, memory=summary_memory)
45 |
46 | #Step 5 - here we invoke a chain. Notice that we just pass in the `question` variables - `chat_history` gets populated by memory
47 | conversation.invoke({"question": "What is the capital of India"})
48 |
49 | #Step 6 - here we print all the messagess in the memory
50 |
51 | print(memory.chat_memory.messages)
52 | print(summary_memory.chat_memory.messages)
53 | print("Summary of the conversation is-->"+summary_memory.buffer)
54 |
55 | #Step 7 - here we ask a another question
56 |
57 | conversation.invoke({"question": "what is oci data science certification?"})
58 |
59 | #Step 8 - here we print all the messagess in the memory again and see that our last question and response is printed.
60 | print(memory.chat_memory.messages)
61 | print(summary_memory.chat_memory.messages)
62 | print("Summary of the conversation is-->"+summary_memory.buffer)
63 |
64 |
65 |
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-oci-genai-ads-v1.py:
--------------------------------------------------------------------------------
1 | import ads
2 | from ads.llm import GenerativeAI
3 | from LoadProperties import LoadProperties
4 |
5 | ads.hello()
6 |
7 | ads.set_auth(auth="api_key")
8 |
9 | properties = LoadProperties()
10 |
11 | llm = GenerativeAI(
12 | compartment_id=properties.getCompartment(),
13 | # Optionally you can specify keyword arguments for the OCI client, e.g. service_endpoint.
14 | client_kwargs={
15 | "service_endpoint": properties.getEndpoint()
16 | },
17 | )
18 |
19 | response = llm.invoke("Translate the following sentence into French:\nHow are you?\n")
20 |
21 | print(response)
22 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-ou-chatbot-chroma-final-v1.py:
--------------------------------------------------------------------------------
1 | import chromadb
2 | from langchain_community.vectorstores import Chroma
3 | from chromadb.config import Settings
4 |
5 | from langchain.memory import ConversationBufferMemory
6 | from langchain.chains import ConversationalRetrievalChain
7 | from langchain.retrievers import ContextualCompressionRetriever
8 | from langchain.retrievers.document_compressors import LLMChainExtractor
9 | from langchain.retrievers.document_compressors import CohereRerank
10 | import json
11 | #from langchain_community.llms import OCIGenAI
12 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
13 | from langchain_community.embeddings import OCIGenAIEmbeddings
14 | from LoadProperties import LoadProperties
15 |
16 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
17 | # We will use Chroma vectorstore.
18 |
19 | #Step 1 - this will set up chain , to be called later
20 |
21 | def create_chain():
22 | properties = LoadProperties()
23 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
24 |
25 | embeddings = OCIGenAIEmbeddings(
26 | model_id=properties.getEmbeddingModelName(),
27 | service_endpoint=properties.getEndpoint(),
28 | compartment_id=properties.getCompartment()
29 | )
30 | db = Chroma(client=client, embedding_function=embeddings)
31 | #retv = db.as_retriever(search_type="mmr", search_kwargs={"k": 7})
32 | retv = db.as_retriever(search_kwargs={"k": 8})
33 |
34 | llm = ChatOCIGenAI(
35 | model_id=properties.getModelName(),
36 | service_endpoint=properties.getEndpoint(),
37 | compartment_id=properties.getCompartment(),
38 | model_kwargs={"max_tokens":200}
39 | )
40 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True, output_key='answer')
41 |
42 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv , memory=memory,
43 | return_source_documents=True)
44 | return qa
45 |
46 | #Step 2 - create chain, here we create a ConversationalRetrievalChain.
47 |
48 | chain = create_chain()
49 |
50 | #Step 3 - here we declare a chat function
51 | def chat(user_message):
52 | # generate a prediction for a prompt
53 | bot_json = chain.invoke({"question": user_message})
54 | print(bot_json)
55 | return {"bot_response": bot_json}
56 |
57 | #Step 4 - here we setup Streamlit text input and pass input text to chat function.
58 | # chat function returns the response and we print it.
59 |
60 | if __name__ == "__main__":
61 | import streamlit as st
62 | st.subheader("OU Chatbot powered by OCI Generative AI Service")
63 | col1 , col2 = st.columns([4,1])
64 |
65 | user_input = st.chat_input()
66 | with col1:
67 | col1.subheader("------Ask me a question about OCI courses------")
68 | #col2.subheader("References")
69 | if "messages" not in st.session_state:
70 | st.session_state.messages = []
71 | if user_input:
72 | bot_response = chat(user_input)
73 | st.session_state.messages.append({"role" : "chatbot", "content" : bot_response})
74 | #st.write("OU Assistant Response: ", bot_response)
75 | for message in st.session_state.messages:
76 | st.chat_message("user")
77 | st.write("Question: ", message['content']['bot_response']['question'])
78 | st.chat_message("assistant")
79 | st.write("Answer: ", message['content']['bot_response']['answer'])
80 | #with col2:
81 | st.chat_message("assistant")
82 | for doc in message['content']['bot_response']['source_documents']:
83 | st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']))
84 |
85 | #st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']) +
86 | # " \n"+ "-relevance score->"+ str(doc.metadata['relevance_score'])
87 | # )
88 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-ou-chatbot-chroma-final-v2.py:
--------------------------------------------------------------------------------
1 | import chromadb
2 | from langchain_community.vectorstores import Chroma
3 | from chromadb.config import Settings
4 |
5 | from langchain.memory import ConversationBufferMemory
6 | from langchain.chains import ConversationalRetrievalChain
7 | from langchain.retrievers import ContextualCompressionRetriever
8 | from langchain.retrievers.document_compressors import LLMChainExtractor
9 | from langchain.retrievers.document_compressors import CohereRerank
10 | import json
11 | #from langchain_community.llms import OCIGenAI
12 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
13 | from langchain_community.embeddings import OCIGenAIEmbeddings
14 | from LoadProperties import LoadProperties
15 |
16 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
17 | # We will use Chroma vectorstore.
18 |
19 | #Step 1 - this will set up chain , to be called later
20 |
21 | def create_chain():
22 | properties = LoadProperties()
23 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
24 | embeddings = OCIGenAIEmbeddings(
25 | model_id=properties.getEmbeddingModelName(),
26 | service_endpoint=properties.getEndpoint(),
27 | compartment_id=properties.getCompartment(),
28 | )
29 | db = Chroma(client=client, embedding_function=embeddings)
30 | retv = db.as_retriever(serach_type="mmr", search_kwargs={"k": 5})
31 |
32 | llm = ChatOCIGenAI(
33 | model_id=properties.getModelName(),
34 | service_endpoint=properties.getEndpoint(),
35 | compartment_id=properties.getCompartment(),
36 | model_kwargs={"max_tokens":200}
37 | )
38 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True, output_key='answer')
39 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv, memory=memory, return_source_documents=True)
40 | return qa
41 |
42 | #Step 2 - create chain, here we create a ConversationalRetrievalChain.
43 |
44 | chain = create_chain()
45 |
46 | #Step 3 - here we declare a chat function
47 | def chat(llm_chain, user_input):
48 | # generate a prediction for a prompt
49 | bot_json = llm_chain.invoke(user_input)
50 | print("bot json is ->", bot_json )
51 | return {"bot_response": bot_json}
52 |
53 | #Step 4 - here we setup Streamlit text input and pass input text to chat function.
54 | # chat function returns the response and we print it.
55 |
56 | if __name__ == "__main__":
57 | import streamlit as st
58 |
59 | st.subheader("Chatbot that answers your study questions")
60 | col1 , col2 = st.columns([4,1])
61 |
62 | def initialize_session_state():
63 | if "llm_chain" not in st.session_state:
64 | st.session_state["llm_chain"] = create_chain()
65 | llm_chain = st.session_state["llm_chain"]
66 | else:
67 | llm_chain = st.session_state["llm_chain"]
68 | return llm_chain
69 |
70 | user_input = st.chat_input()
71 | with col1:
72 | col1.subheader("------Ask me a question about science chapters------")
73 | #col2.subheader("References")
74 | if "messages" not in st.session_state:
75 | st.session_state.messages = []
76 | if user_input:
77 | llm_chain = initialize_session_state()
78 | bot_response = chat(llm_chain, user_input)
79 | print("bot_response->\n", bot_response)
80 | st.session_state.messages.append({"role" : "chatbot", "content" : bot_response})
81 | #st.write("OU Assistant Response: ", bot_response)
82 | for message in st.session_state.messages:
83 | st.chat_message("user")
84 | st.write("Question: ", message['content']['bot_response']['question'])
85 | st.chat_message("assistant")
86 | st.write("Answer: ", message['content']['bot_response']['answer'])
87 | #with col2:
88 | st.chat_message("assistant")
89 | for doc in message['content']['bot_response']['source_documents']:
90 | st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']))
91 |
92 | #st.write("Reference: ", doc.metadata['source'] + " \n"+ "-page->"+str(doc.metadata['page']) +
93 | # " \n"+ "-relevance score->"+ str(doc.metadata['relevance_score'])
94 | # )
95 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-ou-chatbot-chroma-v1.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI, Form
2 | import chromadb
3 | from langchain_community.vectorstores import Chroma
4 | from chromadb.config import Settings
5 |
6 | from langchain.memory import ConversationBufferMemory
7 | from langchain.chains import ConversationalRetrievalChain
8 | import json
9 | #from langchain_community.llms import OCIGenAI
10 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
11 | from langchain_community.embeddings import OCIGenAIEmbeddings
12 | from LoadProperties import LoadProperties
13 |
14 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
15 | # We will use Chroma vectorstore.
16 |
17 | #Step 1 - authenticate using "DEFAULT" profile
18 |
19 | properties = LoadProperties()
20 |
21 | #Step 2 - this will set up chain , to be called later
22 |
23 | def create_chain():
24 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
25 | embeddings = OCIGenAIEmbeddings(
26 | model_id=properties.getEmbeddingModelName(),
27 | service_endpoint=properties.getEndpoint(),
28 | compartment_id=properties.getCompartment(),
29 | )
30 | db = Chroma(client=client, embedding_function=embeddings)
31 | retv = db.as_retriever(serach_type="mmr", search_kwargs={"k": 5})
32 |
33 | llm = ChatOCIGenAI(
34 | model_id=properties.getModelName(),
35 | service_endpoint=properties.getEndpoint(),
36 | compartment_id=properties.getCompartment(),
37 | model_kwargs={"max_tokens":200}
38 | )
39 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True)
40 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv,memory=memory)
41 | return qa
42 |
43 | #Step 3 - create chain, here we create a ConversationalRetrievalChain.
44 |
45 | chain = create_chain()
46 |
47 | #Step 4 - here we declare a chat function
48 | def chat(user_message: str = Form(...)):
49 | # generate a prediction for a prompt
50 | bot_json = chain.invoke({"question": user_message})
51 | return {"bot_response": bot_json}
52 |
53 | #Step 5 - here we setup Streamlit text input and pass input text to chat function.
54 | # chat function returns the response and we print it.
55 |
56 | if __name__ == "__main__":
57 | import streamlit as st
58 | st.title("Oracle University Chatbot")
59 | if "messages" not in st.session_state:
60 | st.session_state.messages = []
61 | user_input = st.chat_input()
62 | if user_input:
63 | bot_response = chat(user_input)
64 | st.session_state.messages.append({"role" : "chatbot", "content" : bot_response})
65 | #st.write("OU Assistant Response: ", bot_response)
66 | for message in st.session_state.messages:
67 | st.chat_message("user")
68 | st.write("Question: ", message['content']['bot_response']['question'])
69 | st.chat_message("assistant")
70 | st.write("Answer: ", message['content']['bot_response']['answer'])
71 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-prompts-v1.py:
--------------------------------------------------------------------------------
1 | from langchain_core.prompts import PromptTemplate
2 | from langchain_core.prompts import ChatPromptTemplate
3 | from langchain_core.prompts import HumanMessagePromptTemplate
4 | from langchain_core.messages import HumanMessage, SystemMessage
5 |
6 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
7 |
8 | from LoadProperties import LoadProperties
9 |
10 | #langchain.debug = True
11 |
12 | #In this demo we will explore using LanChain Prompt templates
13 |
14 | #Step 1 - setup OCI Generative AI llm
15 |
16 | properties = LoadProperties()
17 |
18 | # use default authN method
19 | llm = ChatOCIGenAI(
20 | model_id=properties.getModelName(),
21 | service_endpoint=properties.getEndpoint(),
22 | compartment_id=properties.getCompartment(),
23 | model_kwargs={"max_tokens":200}
24 | )
25 |
26 | #Step 2 - invoke llm with a fixed text input
27 |
28 | response = llm.invoke("Tell me one fact about space", temperature=0.7)
29 | print("Case1 Response - > ")
30 | print(response.pretty_print())
31 |
32 | #Step 3 - Use String Prompt to accept text input. Here we create a template and declare a input variable {human_input}
33 |
34 | #String prompt
35 |
36 | template = """You are a chatbot having a conversation with a human.
37 | Human: {human_input} + {city}
38 | :"""
39 |
40 | #Step 4 - here we create a Prompt using the template
41 |
42 | prompt = PromptTemplate(input_variables=["human_input", "city"], template=template)
43 |
44 | prompt_val = prompt.invoke({"human_input":"Tell us in a exciting tone about", "city":"Las Vegas"})
45 | print("Prompt String is ->")
46 | print(prompt_val.to_string())
47 |
48 | #Step 5 - here we declare a chain that begins with a prompt, next llm and finally output parser
49 |
50 | chain = prompt | llm
51 |
52 | #Step 6 - Next we invoke a chain and provide input question
53 |
54 | response = chain.invoke({"human_input":"Tell us in a exciting tone about", "city":"Las Vegas"})
55 |
56 | #Step 7 - print the prompt and response from the llm
57 |
58 | print("Case2 Response - >")
59 | print(response.pretty_print())
60 |
61 |
62 | #Step 8 - Use Chat Message Prompt to accept text input. Here we create a chat template and use HumanMessage and SystemMessage
63 |
64 | prompt = ChatPromptTemplate.from_messages(
65 | [
66 | ("system", "You are a chatbot that explains in steps."),
67 | ("ai", "I shall explain in steps"),
68 | ("human", "{input}"),
69 | ]
70 | )
71 |
72 | chain = prompt | llm
73 | response = chain.invoke({"input": "What's the New York culture like?"})
74 | print("Case3 Response - > ")
75 | print(response.pretty_print())
76 |
77 | #Step10 - another example with .from_template()
78 |
79 | prompt = ChatPromptTemplate.from_template("Tell me a joke about {animal}")
80 | chain1 = prompt | llm
81 | response = chain1.invoke({"animal": "zebra"})
82 | print("Case4 Response - > ")
83 | print(response.pretty_print())
84 |
85 | #another example
86 |
87 | chat_template = ChatPromptTemplate.from_messages(
88 | [
89 | SystemMessage(
90 | content=(
91 | "You are a helpful assistant that re-writes the user's text to "
92 | "sound more upbeat."
93 | )
94 | ),
95 | HumanMessagePromptTemplate.from_template("{text}"),
96 | ]
97 | )
98 |
99 | chain2 = chat_template | llm
100 | response = chain2.invoke({"text":"I don't like eating tasty things"})
101 | print("Case5 Response ->")
102 | print(response.pretty_print())
103 |
104 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-retrieval-chroma-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | import chromadb
3 | from langchain_community.vectorstores import Chroma
4 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
5 | from langchain_community.embeddings import OCIGenAIEmbeddings
6 | from LoadProperties import LoadProperties
7 |
8 | #In this demo we will retrieve documents and send these as a context to the LLM.
9 |
10 | #Step 1 - setup OCI Generative AI llm
11 |
12 | properties = LoadProperties()
13 |
14 | # use default authN method API-key
15 | llm = ChatOCIGenAI(
16 | model_id=properties.getModelName(),
17 | service_endpoint=properties.getEndpoint(),
18 | compartment_id=properties.getCompartment(),
19 | model_kwargs={"max_tokens":100}
20 | )
21 |
22 | #Step 2 - here we connect to a chromadb server. we need to run the chromadb server before we connect to it
23 |
24 | client = chromadb.HttpClient(host="127.0.0.1")
25 |
26 | #Step 3 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
27 |
28 | embeddings = OCIGenAIEmbeddings(
29 | model_id=properties.getEmbeddingModelName(),
30 | service_endpoint=properties.getEndpoint(),
31 | compartment_id=properties.getCompartment(),
32 | )
33 |
34 | #Step 4 - here we create a retriever that gets relevant documents (similar in meaning to a query)
35 |
36 | db = Chroma(client=client, embedding_function=embeddings)
37 |
38 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
39 |
40 | #Step 5 - here we can explore how similar documents to the query are returned by prining the document metadata. This step is optional
41 |
42 | docs = retv.invoke('Tell us which module is most relevant to LLMs and Generative AI')
43 |
44 | def pretty_print_docs(docs):
45 | print(
46 | f"\n{'-' * 100}\n".join(
47 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
48 | )
49 | )
50 |
51 | pretty_print_docs(docs)
52 |
53 | for doc in docs:
54 | print(doc.metadata)
55 |
56 | #Step 6 - here we create a retrieval chain that takes llm , retirever objects and invoke it to get a response to our query
57 |
58 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv,return_source_documents=True)
59 |
60 | response = chain.invoke("Tell us which module is most relevant to LLMs and Generative AI")
61 |
62 | print(response)
63 |
64 |
65 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-retrieval-faiss-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | from langchain_community.vectorstores import FAISS
3 |
4 | from langchain_community.embeddings import OCIGenAIEmbeddings
5 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
6 | from langchain_core.messages import HumanMessage
7 |
8 | from LoadProperties import LoadProperties
9 |
10 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
11 | # We will use FASSS vectorstore.
12 |
13 | #Step 1 - setup OCI Generative AI llm
14 |
15 | properties = LoadProperties()
16 |
17 | # use default authN method
18 | llm = ChatOCIGenAI(
19 | model_id=properties.getModelName(),
20 | service_endpoint=properties.getEndpoint(),
21 | compartment_id=properties.getCompartment(),
22 | model_kwargs={"max_tokens":200}
23 | )
24 |
25 | #Step 2 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
26 |
27 | embeddings = OCIGenAIEmbeddings(
28 | model_id=properties.getEmbeddingModelName(),
29 | service_endpoint=properties.getEndpoint(),
30 | compartment_id=properties.getCompartment(),
31 | )
32 |
33 | #Step 3 - here we load the index and create a retriever that gets relevant documents (similar in meaning to a query)
34 |
35 | db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
36 |
37 | #retv = db.as_retriever(search_kwargs={"k": 3})
38 |
39 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
40 |
41 | #Step 5 - here we can explore how similar documents to the query are returned by prining the document metadata. This step is optional
42 |
43 | docs = retv.invoke('Tell us which module is most relevant to LLMs and Generative AI')
44 |
45 | def pretty_print_docs(docs):
46 | print(
47 | f"\n{'-' * 100}\n".join(
48 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
49 | )
50 | )
51 |
52 | pretty_print_docs(docs)
53 |
54 | docs1 = []
55 |
56 | for doc in docs:
57 | #print(doc)
58 | docs1.append({"snippet": doc.page_content})
59 | #print(docs1)
60 |
61 |
62 | #Step 4 - here we create a retrieval chain that takes llm , retirever objects and invoke it to get a response to our query
63 |
64 | chain = RetrievalQA.from_chain_type(llm=llm, retriever=retv,return_source_documents=True)
65 |
66 | response = chain.invoke("Tell us which module is relevant to LLMs and Generative AI")
67 |
68 | # ChatOCIGenAI supports documents, following code passes the documents directly to ChatOCIGenAI directly.
69 | #messages = [HumanMessage(content="Tell us which module of AI Foundations course is relevant to Transformers")]
70 | #response = llm.invoke(messages,documents=docs1)
71 |
72 | print(response)
73 |
74 |
75 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-retrieval-memory-chroma-traces-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.memory import ConversationBufferMemory
2 | from langchain.chains import ConversationalRetrievalChain
3 | import chromadb
4 | from langchain_community.vectorstores import Chroma
5 | from chromadb.config import Settings
6 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
7 | from langchain_community.embeddings import OCIGenAIEmbeddings
8 | import os
9 | from uuid import uuid4
10 | from LoadProperties import LoadProperties
11 |
12 | properties = LoadProperties()
13 |
14 | unique_id = uuid4().hex[0:8]
15 | os.environ["LANGCHAIN_TRACING_V2"] = "true"
16 | os.environ["LANGCHAIN_PROJECT"] = f"Test111 - {unique_id}"
17 | os.environ["LANGCHAIN_ENDPOINT"] = properties.getlangChainEndpoint()
18 | os.environ["LANGCHAIN_API_KEY"] = properties.getLangChainKey()
19 |
20 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
21 | # We will use Chroma vectorstore.
22 |
23 | #Step 1 - setup OCI Generative AI llm
24 |
25 | # use default authN method API-key
26 | llm = ChatOCIGenAI(
27 | model_id=properties.getModelName(),
28 | service_endpoint=properties.getEndpoint(),
29 | compartment_id=properties.getCompartment(),
30 | model_kwargs={"max_tokens":400}
31 | )
32 |
33 | #Step 2 - here we connect to a chromadb server. we need to run the chromadb server before we connect to it
34 |
35 | client = chromadb.HttpClient(host="127.0.0.1",settings=Settings(allow_reset=True))
36 |
37 | #Step 3 - here we crete embeddings model
38 |
39 | embeddings = OCIGenAIEmbeddings(
40 | model_id=properties.getEmbeddingModelName(),
41 | service_endpoint=properties.getEndpoint(),
42 | compartment_id=properties.getCompartment(),
43 | )
44 |
45 | #Step 4 - here we create a retriever that gets relevant documents (similar in meaning to a query)
46 |
47 | db = Chroma(client=client, embedding_function=embeddings)
48 |
49 | retv = db.as_retriever(search_type="similarity", search_kwargs={"k": 8})
50 |
51 | def pretty_print_docs(docs):
52 | print(
53 | f"\n{'-' * 100}\n".join(
54 | [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
55 | )
56 | )
57 |
58 | #Step 5 - here we create a memory to remember chat messages.
59 |
60 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True, output_key='answer')
61 |
62 |
63 | #Step 6 - here we create a chain that uses llm, retriever and memory.
64 |
65 | #You can also define the chain type as one of the four options: “stuff”, “map reduce”, “refine”, “map_rerank”.
66 |
67 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv, memory=memory, return_source_documents=True)
68 |
69 | response = qa.invoke({"question": "Tell us about Oracle Cloud Infrastructure AI Foundations course"})
70 | print(memory.chat_memory.messages)
71 |
72 |
73 | response = qa.invoke({"question": "Which module of the course is relevant to the LLMs and Transformers"})
74 | print(memory.chat_memory.messages)
75 |
76 | print(response)
77 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-retrieval-memory-faiss-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | from langchain.schema import StrOutputParser
3 | from langchain_community.vectorstores import FAISS
4 | from langchain.memory import ConversationBufferMemory
5 | from langchain.chains import ConversationalRetrievalChain
6 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
7 | from langchain_community.embeddings import OCIGenAIEmbeddings
8 | import oci
9 | from LoadProperties import LoadProperties
10 |
11 | #In this demo we will explore using RetirvalQA chain to retrieve relevant documents and send these as a context in a query.
12 | #We will useFASSS vectorstore.
13 |
14 | # Step 1 - authenticate using "DEFAULT" profile
15 |
16 | properties = LoadProperties()
17 |
18 | # Step 2 - setup OCI Generative AI llm
19 |
20 | # use default authN method API-key
21 | llm = ChatOCIGenAI(
22 | model_id=properties.getModelName(),
23 | service_endpoint=properties.getEndpoint(),
24 | compartment_id=properties.getCompartment(),
25 | model_kwargs={"max_tokens": 100}
26 | )
27 |
28 | # Step 3 - here we crete embeddings using 'cohere.embed-english-light-v2.0" model.
29 |
30 | embeddings = OCIGenAIEmbeddings(
31 | model_id=properties.getEmbeddingModelName(),
32 | service_endpoint=properties.getEndpoint(),
33 | compartment_id=properties.getCompartment(),
34 | )
35 |
36 | # Step 4 - here we load the index and create a retriever that gets relevant documents (similar in meaning to a query)
37 |
38 | db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
39 |
40 | retv = db.as_retriever(search_kwargs={"k": 8})
41 |
42 |
43 | # Step 5 - here we can explore how similar documents to the query are returned by prining the document metadata. This step is optional
44 |
45 | docs = retv.invoke('Module 5: Generative AI and LLM Foundations')
46 |
47 | print(docs)
48 |
49 | for doc in docs:
50 | print(doc.metadata)
51 |
52 | # Step 6 - here we create a memory to remember chat messages.
53 |
54 | memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True)
55 |
56 | # Step 7 - here we create a chain that uses llm, retriever and memory.
57 |
58 | # You can also define the chain type as one of the four options: “stuff”, “map reduce”, “refine”, “map_rerank”.
59 |
60 | qa = ConversationalRetrievalChain.from_llm(llm, retriever=retv, memory=memory)
61 |
62 | # Step 8 - we ask question 1 and print relevant document's metadata and chat messages.
63 | docs = retv.invoke("How many modules are there in AI Foundations Course ")
64 | print(len(docs))
65 |
66 | for doc in docs:
67 | print(doc.metadata)
68 |
69 | qa.invoke({"question": "Tell us about oracle cloud ai foundations Module 5"})
70 | print(memory.chat_memory.messages)
71 |
72 | # Step 8 - we ask question 2 and print relevant document's metadata and chat messages.
73 | docs = retv.invoke("Tell us more about oracle cloud multicloud federation")
74 | print(len(docs))
75 |
76 | for doc in docs:
77 | print(doc.metadata)
78 |
79 | qa.invoke({"question": "Tell us more about oracle cloud multicloud federation"})
80 | print(memory.chat_memory.messages)
81 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-runnable-parallel-v1.py:
--------------------------------------------------------------------------------
1 | import ads
2 | from langchain_core.runnables import RunnableParallel
3 | from langchain_core.prompts import ChatPromptTemplate
4 | from LoadProperties import LoadProperties
5 |
6 | #In this demo we show how 2 chains can be run in parallel.
7 | properties = LoadProperties()
8 |
9 | ads.set_auth(auth="api_key")
10 |
11 | from ads.llm import GenerativeAI
12 |
13 | model = GenerativeAI(
14 | compartment_id=properties.getCompartment(),
15 | # Optionally you can specify keyword arguments for the OCI client, e.g. service_endpoint.
16 | client_kwargs={
17 | "service_endpoint": properties.getEndpoint()
18 | },
19 | )
20 |
21 | chain1 = ChatPromptTemplate.from_template("tell me a joke about {topic1}") | model
22 | chain2 = ChatPromptTemplate.from_template("write a short (2 line) poem about {topic2}") | model
23 |
24 | combined = RunnableParallel(joke=chain1, poem=chain2)
25 |
26 | response = combined.invoke({"topic1":"pig","topic2":"parrot"})
27 |
28 | print(response)
29 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-sessions-v1.py:
--------------------------------------------------------------------------------
1 | from langchain.memory import ConversationBufferMemory
2 | from langchain_community.chat_message_histories import StreamlitChatMessageHistory
3 | from langchain.chains import LLMChain
4 | from langchain.prompts import PromptTemplate
5 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
6 | from LoadProperties import LoadProperties
7 |
8 |
9 | #In this demo we will explore using Streamlit session to store chat messages
10 |
11 |
12 | #Step 1 - setup OCI Generative AI llm
13 |
14 | properties = LoadProperties()
15 |
16 | # use default authN method API-key
17 | llm = ChatOCIGenAI(
18 | model_id=properties.getModelName(),
19 | service_endpoint=properties.getEndpoint(),
20 | compartment_id=properties.getCompartment(),
21 | model_kwargs={"max_tokens":100}
22 | )
23 |
24 | #Step 2 - here we create a history with a key "chat_messages.
25 |
26 | #StreamlitChatMessageHistory will store messages in Streamlit session state at the specified key=.
27 | #A given StreamlitChatMessageHistory will NOT be persisted or shared across user sessions.
28 |
29 | history = StreamlitChatMessageHistory(key="chat_messages")
30 |
31 | #Step 3 - here we create a memory object
32 |
33 | memory = ConversationBufferMemory(chat_memory=history)
34 |
35 | #Step 4 - here we create template and prompt to accept a question
36 |
37 | template = """You are an AI chatbot having a conversation with a human.
38 | Human: {human_input}
39 | AI: """
40 | prompt = PromptTemplate(input_variables=["human_input"], template=template)
41 |
42 | #Step 5 - here we create a chain object
43 |
44 | llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory)
45 |
46 | #Step 6 - here we use streamlit to print all messages in the memory, create text imput, run chain and
47 | #the question and response is automatically put in the StreamlitChatMessageHistory
48 |
49 | import streamlit as st
50 |
51 | st.title('🦜🔗 Welcome to the ChatBot')
52 | for msg in history.messages:
53 | st.chat_message(msg.type).write(msg.content)
54 |
55 | if x := st.chat_input():
56 | st.chat_message("human").write(x)
57 |
58 | # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
59 | response = llm_chain.invoke(x)
60 | st.chat_message("ai").write(response["text"])
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/demos/module4_new/demo-streamlit-v1.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | #from langchain_community.llms import OCIGenAI
3 | from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
4 | import oci
5 | from LoadProperties import LoadProperties
6 |
7 | #In this demo we will explore using Streamlit to input a question to llm and display the response
8 |
9 | #Step 1 - setup OCI Generative AI llm
10 | properties = LoadProperties()
11 |
12 | # use default authN method API-key
13 | llm = ChatOCIGenAI(
14 | model_id=properties.getModelName(),
15 | service_endpoint=properties.getEndpoint(),
16 | compartment_id=properties.getCompartment(),
17 | model_kwargs={"max_tokens":100}
18 | )
19 |
20 | #Step 2 - we define a function to return response
21 |
22 | def generate_response(input_text):
23 | st.info(llm.invoke(input_text).content)
24 |
25 | #Step 4 - here we write a quick streamlit application that accepts text input (question) and
26 | # on clicking a 'submit button call a function that generates response
27 |
28 | st.title('🦜🔗 Welcome to the ChatBot')
29 | with st.form('my_form'):
30 | text = st.text_area('Enter text:', 'What are the three key pieces of advice for learning how to code?')
31 | submitted = st.form_submit_button('Submit')
32 | if submitted :
33 | generate_response(text)
34 |
--------------------------------------------------------------------------------
/demos/module4_new/pdf-docs/oci-ai-foundations.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ou-developers/ou-generativeai-pro/d95e9e524101a071e400498c6b58bd3c762a7109/demos/module4_new/pdf-docs/oci-ai-foundations.pdf
--------------------------------------------------------------------------------
/demos/module5/oci-ai-foundations.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ou-developers/ou-generativeai-pro/d95e9e524101a071e400498c6b58bd3c762a7109/demos/module5/oci-ai-foundations.pdf
--------------------------------------------------------------------------------
/demos/module5/sql_sheet.sql:
--------------------------------------------------------------------------------
1 | -- ACL to let user go out everywhere (host =>'*'), it's not required for Oracle Base Database.
2 | begin
3 | -- Allow all hosts for HTTP/HTTP_PROXY
4 | dbms_network_acl_admin.append_host_ace(
5 | host =>'*',
6 | lower_port => 443,
7 | upper_port => 443,
8 | ace => xs$ace_type(
9 | privilege_list => xs$name_list('http', 'http_proxy'),
10 | principal_name => upper('admin'),
11 | principal_type => xs_acl.ptype_db)
12 | );
13 | end;
14 | /
15 |
16 |
17 | -- DBMS_CLOUD credentials
18 | -- Some examples are based on DBMS_CLOUD, that is included in Autonomous DB.
19 | -- If you need to install it (for example on Base Database) you can refer to: https://support.oracle.com/knowledge/Oracle%20Cloud/2748362_1.html
20 | begin
21 | DBMS_CLOUD.CREATE_CREDENTIAL (
22 | credential_name => 'xxxxxxxxxxxxx',
23 | user_ocid => 'xxxxxxxxxxxxx',
24 | tenancy_ocid => 'xxxxxxxxxxxxx',
25 | private_key => 'xxxxxxxxxxxxx',
26 | fingerprint => 'xxxxxxxxxxxxx'
27 | );
28 | end;
29 | /
30 |
31 |
32 | declare
33 | jo json_object_t;
34 | begin
35 | jo := json_object_t();
36 | jo.put('user_ocid','xxxxxxxxxxxxx');
37 | jo.put('tenancy_ocid','xxxxxxxxxxxxx');
38 | jo.put('compartment_ocid','xxxxxxxxxxxxx');
39 | jo.put('private_key','xxxxxxxxxxxxx');
40 | jo.put('fingerprint','xxxxxxxxxxxxx');
41 | dbms_vector.create_credential(
42 | credential_name => 'OCI_CRED',
43 | params => json(jo.to_string)
44 | );
45 | end;
46 | /
47 |
48 |
49 | SELECT
50 | dbms_vector.utl_to_embedding(
51 | 'hello',
52 | json('{
53 | "provider": "OCIGenAI",
54 | "credential_name": "OCI_CRED",
55 | "url": "https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com/20231130/actions/embedText",
56 | "model": "cohere.embed-multilingual-v3.0"
57 | }')
58 | )
59 | FROM dual;
60 |
61 |
62 | CREATE TABLE ai_extracted_data AS
63 | SELECT
64 | j.chunk_id,
65 | j.chunk_offset,
66 | j.chunk_length,
67 | j.chunk_data
68 | FROM
69 | -- divide a blob into chunks (utl_to_chunks):
70 | (select * from dbms_vector_chain.utl_to_chunks(
71 | dbms_vector_chain.utl_to_text(
72 | to_blob(
73 | DBMS_CLOUD.GET_OBJECT('OCI_CRED_BUCKET', 'https://objectstorage.eu-frankfurt-1.oraclecloud.com/p/xxxxxxxxxxxxx/n/intoraclerohit/b/GenAI-Agents/o/faq.txt')
74 | )
75 | ), json('{"max":"200", "normalize":"all", "overlap":"20"}')
76 | )
77 | ),
78 | JSON_TABLE(column_value, '$'
79 | COLUMNS (
80 | chunk_id NUMBER PATH '$.chunk_id',
81 | chunk_offset NUMBER PATH '$.chunk_offset',
82 | chunk_length NUMBER PATH '$.chunk_length',
83 | chunk_data CLOB PATH '$.chunk_data'
84 | )
85 | ) j;
86 |
87 |
88 | select * from ai_extracted_data;
89 |
90 |
91 | select count(*) from ai_extracted_data;
92 |
93 |
94 | -- Create vector table from an existing table
95 | -- There is a quota limit for running the embedding model. For datasets with more than 400 records, we can repeatedly load the data or write a script to load data in batches.
96 | -- In the following table ai_extracted_data, chunk_id is the record id while chunk_data is the content column.
97 | create table ai_extracted_data_vector as (
98 | select chunk_id as docid, to_char(chunk_data) as body, dbms_vector.utl_to_embedding(
99 | chunk_data,
100 | json('{
101 | "provider": "OCIGenAI",
102 | "credential_name": "OCI_CRED",
103 | "url": "https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com/20231130/actions/embedText",
104 | "model": "cohere.embed-multilingual-v3.0"
105 | }')
106 | ) as text_vec
107 | from ai_extracted_data
108 | where chunk_id <= 400
109 | );
110 |
111 |
112 | insert into ai_extracted_data_vector
113 | select chunk_id as docid, to_char(chunk_data) as body, dbms_vector.utl_to_embedding(
114 | chunk_data,
115 | json('{
116 | "provider": "OCIGenAI",
117 | "credential_name": "OCI_CRED",
118 | "url": "https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com/20231130/actions/embedText",
119 | "model": "cohere.embed-multilingual-v3.0"
120 | }')
121 | ) as text_vec
122 | from ai_extracted_data
123 | where chunk_id > 400;
124 |
125 |
126 | select * from ai_extracted_data_vector;
127 |
128 |
129 | select count(*) from ai_extracted_data_vector;
130 |
131 |
132 | -- Create function from vector table
133 | -- When returning the results, rename (alias) the record ID as 'DOCID', the content column as 'BODY', and the VECTOR_DISTANCE between text_vec and query_vec as 'SCORE'. These 3 columns are required. If the vector table includes 'URL' and 'Title' columns, rename them (alias) as 'URL' and 'TITLE' respectively.
134 | create or replace FUNCTION retrieval_func_ai (
135 | p_query IN VARCHAR2,
136 | top_k IN NUMBER
137 | ) RETURN SYS_REFCURSOR IS
138 | v_results SYS_REFCURSOR;
139 | query_vec VECTOR;
140 | BEGIN
141 | query_vec := dbms_vector.utl_to_embedding(
142 | p_query,
143 | json('{
144 | "provider": "OCIGenAI",
145 | "credential_name": "OCI_CRED",
146 | "url": "https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com/20231130/actions/embedText",
147 | "model": "cohere.embed-multilingual-v3.0"
148 | }')
149 | );
150 |
151 | OPEN v_results FOR
152 | SELECT DOCID, BODY, VECTOR_DISTANCE(text_vec, query_vec) as SCORE
153 | FROM ai_extracted_data_vector
154 | ORDER BY SCORE
155 | FETCH FIRST top_k ROWS ONLY;
156 |
157 | RETURN v_results;
158 | END;
159 |
160 |
161 | -- Run & check the function
162 | -- Display the DOCID and SCORE
163 | DECLARE
164 | v_results SYS_REFCURSOR;
165 | v_docid VARCHAR2(100);
166 | v_body VARCHAR2(4000);
167 | v_score NUMBER;
168 | p_query VARCHAR2(100) := 'Tell me about Oracle Free Tier Account?';
169 | top_k PLS_INTEGER := 10;
170 | BEGIN
171 | v_results := retrieval_func_ai(p_query, top_k);
172 |
173 | DBMS_OUTPUT.PUT_LINE('DOCID | BODY | SCORE');
174 | DBMS_OUTPUT.PUT_LINE('--------|------|------');
175 |
176 | LOOP
177 | FETCH v_results INTO v_docid, v_body, v_score;
178 | EXIT WHEN v_results%NOTFOUND;
179 |
180 | DBMS_OUTPUT.PUT_LINE(v_docid || ' | ' || v_body || ' | ' || v_score);
181 | END LOOP;
182 |
183 | CLOSE v_results;
184 | END;
185 |
--------------------------------------------------------------------------------
/demos/test_scripts/oracle23ai_rag.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import array
3 | import time
4 | import oci
5 | import os
6 | from dotenv import load_dotenv
7 | from PyPDF2 import PdfReader
8 | import oracledb
9 | from sentence_transformers import CrossEncoder
10 | from langchain.text_splitter import CharacterTextSplitter
11 | from langchain_huggingface import HuggingFaceEmbeddings
12 | from langchain_community.vectorstores.utils import DistanceStrategy
13 | from langchain_core.documents import BaseDocumentTransformer, Document
14 | from langchain_community.llms import OCIGenAI
15 | from langchain.llms import Cohere
16 | from langchain_core.prompts import PromptTemplate
17 | from langchain.chains import LLMChain
18 | from langchain_core.runnables import RunnablePassthrough
19 | from langchain_core.output_parsers import StrOutputParser
20 | from langchain_community.vectorstores import oraclevs
21 | from langchain_community.vectorstores.oraclevs import OracleVS
22 | from langchain_community.embeddings import OCIGenAIEmbeddings
23 | print("Successfully imported libraries and modules")
24 |
25 | # Function to format and add metadata to Oracle 23ai Vector Store
26 |
27 | def chunks_to_docs_wrapper(row: dict) -> Document:
28 | """
29 | Converts text into a Document object suitable for ingestion into Oracle Vector Store.
30 | - row (dict): A dictionary representing a row of data with keys for 'id', 'link', and 'text'.
31 | """
32 | metadata = {'id': str(row['id']), 'link': row['link']}
33 | print(metadata)
34 | return Document(page_content=row['text'], metadata=metadata)
35 | print("Successfully defined metadata wrapper")
36 |
37 | # Load environment variables
38 |
39 | load_dotenv()
40 | username = " "
41 | password = " "
42 | dsn = ''' '''
43 |
44 | COMPARTMENT_OCID = " "
45 | print("The database user name is:",username)
46 | print("Database connection information is:",dsn)
47 |
48 | # Connect to the database
49 |
50 | try:
51 | conn23c = oracledb.connect(user=username, password=password, dsn=dsn)
52 | print("Connection successful!")
53 | except Exception as e:
54 | print("Connection failed!")
55 |
56 |
57 | # RAG Step 1 - Load the document
58 |
59 | # creating a pdf reader object
60 | pdf = PdfReader('doc.pdf')
61 |
62 | # print number of pages in pdf file
63 | print("The number of pages in this document is ",len(pdf.pages))
64 | # print the first page
65 | print(pdf.pages[0].extract_text())
66 |
67 | # RAG Step 2 - Transform the document to text
68 |
69 | if pdf is not None:
70 | print("Transforming the PDF document to text...")
71 | text=""
72 | for page in pdf.pages:
73 | text += page.extract_text()
74 | print("You have transformed the PDF document to text format")
75 |
76 | # RAG Step 3 - Chunk the text document into smaller chunks
77 | text_splitter = CharacterTextSplitter(separator="\n",chunk_size=800,chunk_overlap=100,length_function=len)
78 | chunks = text_splitter.split_text(text)
79 | print(chunks[0])
80 |
81 | # Create metadata wrapper to store additional information in the vector store
82 | """
83 | Converts a row from a DataFrame into a Document object suitable for ingestion into Oracle Vector Store.
84 | - row (dict): A dictionary representing a row of data with keys for 'id', 'link', and 'text'.
85 | """
86 | docs = [chunks_to_docs_wrapper({'id': page_num, 'link': f'Page {page_num}', 'text': text}) for page_num, text in enumerate(chunks)]
87 | print("Created metadata wrapper with the chunks")
88 |
89 | # RAG Step 4 - Using an embedding model, embed the chunks as vectors into Oracle Database 23ai.
90 |
91 | # Initialize embedding model
92 |
93 | model_4db = OCIGenAIEmbeddings(model_id=" ",service_endpoint=" ",compartment_id=" ")
94 |
95 | print("check....Done")
96 | # Configure the vector store with the model, table name, and using the indicated distance strategy for the similarity search and vectorize the chunks
97 | s1time = time.time()
98 | knowledge_base = OracleVS.from_documents(docs, model_4db, client=conn23c, table_name="MY_DEMO", distance_strategy=DistanceStrategy.DOT_PRODUCT)
99 | s2time = time.time()
100 | print("check....Done")
101 | print( f"Vectorizing and inserting chunks duration: {round(s2time - s1time, 1)} sec.")
102 |
103 | # Take a moment to celebrate. You have successfully uploaded the document, transformed it to text, split into chunks, and embedded its vectors in Oracle Database 23ai
104 |
105 | print("Yay! You have successfully uploaded the document, transformed it to text, split into chunks, and embedded its vectors in Oracle Database 23ai")
106 |
107 | # RAG Step 5 - Build the prompt to query the document
108 |
109 | user_question = (" ")
110 | print ("The prompt to the LLM will be:",user_question)
111 |
112 |
113 | # Choice 1, Set the OCI GenAI LLM
114 | ENDPOINT = " "
115 | COMPARTMENT_OCID = COMPARTMENT_OCID
116 | print(ENDPOINT)
117 |
118 |
119 | cohere_api_key = " "
120 |
121 | llmOCI = Cohere(
122 | model="command",
123 | cohere_api_key=cohere_api_key,
124 | max_tokens=1000,
125 | temperature=0.7
126 | )
127 |
128 | # Set up the template for the questions and context, and instantiate the database retriever object
129 | template = """Answer the question based only on the following context:
130 | {context} Question: {user_question}"""
131 | prompt = PromptTemplate.from_template(template)
132 | retriever = knowledge_base.as_retriever()
133 |
134 | # RAG Steps 6 and 7 Chain the entire process together, retrieve the context, construct the prompt with the question and context, and pass to LLM for the response
135 |
136 | s5time = time.time()
137 | print("We are sending the prompt and RAG context to the LLM, wait a few seconds for the response...")
138 | chain = (
139 | {"context": retriever, "user_question": RunnablePassthrough()}
140 | | prompt
141 | | llmOCI
142 | | StrOutputParser()
143 | )
144 | response = chain.invoke(user_question)
145 | print(user_question)
146 | print(response)
147 | # Print timings for the RAG execution steps
148 |
149 | s6time = time.time()
150 | print("")
151 | print( f"Send user question and ranked chunks to LLM and get answer duration: {round(s6time - s5time, 1)} sec.")
152 |
153 | print("")
154 | print("Congratulations! You've completed your RAG application with AI Vector Search in Oracle Database 23ai using LangChain")
155 |
--------------------------------------------------------------------------------
/labs/LoadProperties.py:
--------------------------------------------------------------------------------
1 | class LoadProperties:
2 |
3 | def __init__(self):
4 |
5 | import json
6 | # reading the data from the file
7 | with open('config.txt') as f:
8 | data = f.read()
9 |
10 | js = json.loads(data)
11 |
12 | self.model_name = js["model_name"]
13 | self.endpoint = js["endpoint"]
14 | self.compartment_ocid = js["compartment_ocid"]
15 | self.embedding_model_name=js["embedding_model_name"]
16 |
17 | def getModelName(self):
18 | return self.model_name
19 |
20 | def getEndpoint(self):
21 | return self.endpoint
22 |
23 | def getCompartment(self):
24 | return self.compartment_ocid
25 |
26 | def getEmbeddingModelName(self):
27 | return self.embedding_model_name
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/labs/config.txt:
--------------------------------------------------------------------------------
1 | {"model_name":"cohere.command-r-08-2024",
2 | "embedding_model_name":"cohere.embed-english-v3.0",
3 | "endpoint":"https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com",
4 | "compartment_ocid":""
5 | }
6 |
--------------------------------------------------------------------------------
/labs/init-genailabs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Define a log file for capturing all output
4 | LOGFILE=/var/log/cloud-init-output.log
5 | exec > >(tee -a $LOGFILE) 2>&1
6 |
7 | # Marker file to ensure the script only runs once
8 | MARKER_FILE="/home/opc/.init_done"
9 |
10 | # Check if the marker file exists
11 | if [ -f "$MARKER_FILE" ]; then
12 | echo "Init script has already been run. Exiting."
13 | exit 0
14 | fi
15 |
16 | echo "===== Starting Cloud-Init Script ====="
17 |
18 | # Expand the boot volume
19 | echo "Expanding boot volume..."
20 | sudo /usr/libexec/oci-growfs -y
21 |
22 | # Enable ol8_addons and install necessary development tools
23 | echo "Installing required packages..."
24 | sudo dnf config-manager --set-enabled ol8_addons
25 | sudo dnf install -y podman git libffi-devel bzip2-devel ncurses-devel readline-devel wget make gcc zlib-devel openssl-devel
26 |
27 | # Install the latest SQLite from source
28 | echo "Installing latest SQLite..."
29 | cd /tmp
30 | wget https://www.sqlite.org/2023/sqlite-autoconf-3430000.tar.gz
31 | tar -xvzf sqlite-autoconf-3430000.tar.gz
32 | cd sqlite-autoconf-3430000
33 | ./configure --prefix=/usr/local
34 | make
35 | sudo make install
36 |
37 | # Verify the installation of SQLite
38 | echo "SQLite version:"
39 | /usr/local/bin/sqlite3 --version
40 |
41 | # Ensure the correct version is in the path and globally
42 | export PATH="/usr/local/bin:$PATH"
43 | export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
44 | echo 'export PATH="/usr/local/bin:$PATH"' >> /home/opc/.bashrc
45 | echo 'export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"' >> /home/opc/.bashrc
46 |
47 | # Set environment variables to link the newly installed SQLite with Python build globally
48 | echo 'export CFLAGS="-I/usr/local/include"' >> /home/opc/.bashrc
49 | echo 'export LDFLAGS="-L/usr/local/lib"' >> /home/opc/.bashrc
50 |
51 | # Source the updated ~/.bashrc to apply changes globally
52 | source /home/opc/.bashrc
53 |
54 | # Create a persistent volume directory for Oracle data
55 | echo "Creating Oracle data directory..."
56 | sudo mkdir -p /home/opc/oradata
57 | echo "Setting up permissions for the Oracle data directory..."
58 | sudo chown -R 54321:54321 /home/opc/oradata
59 | sudo chmod -R 755 /home/opc/oradata
60 |
61 | # Run the Oracle Database Free Edition container
62 | echo "Running Oracle Database container..."
63 | sudo podman run -d \
64 | --name 23ai \
65 | --network=host \
66 | -e ORACLE_PWD=database123 \
67 | -v /home/opc/oradata:/opt/oracle/oradata:z \
68 | container-registry.oracle.com/database/free:latest
69 |
70 | # Wait for Oracle Container to start
71 | echo "Waiting for Oracle container to initialize..."
72 | sleep 10
73 |
74 | # Check if the listener is up and if the freepdb1 service is registered
75 | echo "Checking if service freepdb1 is registered with the listener..."
76 | while ! sudo podman exec 23ai bash -c "lsnrctl status | grep -q freepdb1"; do
77 | echo "Waiting for freepdb1 service to be registered with the listener..."
78 | sleep 30
79 | done
80 | echo "freepdb1 service is registered with the listener."
81 |
82 | # Retry loop for Oracle login with error detection
83 | MAX_RETRIES=5
84 | RETRY_COUNT=0
85 | DELAY=10
86 |
87 | while true; do
88 | OUTPUT=$(sudo podman exec 23ai bash -c "sqlplus -S sys/database123@localhost:1521/freepdb1 as sysdba <> $HOME/.bashrc
152 | export PYENV_ROOT="\$HOME/.pyenv"
153 | [[ -d "\$PYENV_ROOT/bin" ]] && export PATH="\$PYENV_ROOT/bin:\$PATH"
154 | eval "\$(pyenv init --path)"
155 | eval "\$(pyenv init -)"
156 | eval "\$(pyenv virtualenv-init -)"
157 | EOF
158 |
159 | # Ensure .bashrc is sourced on login
160 | cat << EOF >> $HOME/.bash_profile
161 | if [ -f ~/.bashrc ]; then
162 | source ~/.bashrc
163 | fi
164 | EOF
165 |
166 | # Source the updated ~/.bashrc to apply pyenv changes
167 | source $HOME/.bashrc
168 |
169 | # Export PATH to ensure pyenv is correctly initialized
170 | export PATH="$PYENV_ROOT/bin:$PATH"
171 |
172 | # Install Python 3.11.9 using pyenv with the correct SQLite version linked
173 | CFLAGS="-I/usr/local/include" LDFLAGS="-L/usr/local/lib" LD_LIBRARY_PATH="/usr/local/lib" pyenv install 3.11.9
174 |
175 | # Rehash pyenv to update shims
176 | pyenv rehash
177 |
178 | # Set up vectors directory and Python 3.11.9 environment
179 | mkdir -p $HOME/labs
180 | cd $HOME/labs
181 | pyenv local 3.11.9
182 |
183 | # Rehash again to ensure shims are up to date
184 | pyenv rehash
185 |
186 | # Verify Python version in the labs directory
187 | python --version
188 |
189 | # Adding the PYTHONPATH for correct installation and look up for the libraries
190 | export PYTHONPATH=$HOME/.pyenv/versions/3.11.9/lib/python3.11/site-packages:$PYTHONPATH
191 |
192 | # Install required Python packages
193 | $HOME/.pyenv/versions/3.11.9/bin/pip install --no-cache-dir oci==2.129.1 oracledb sentence-transformers langchain==0.2.6 langchain-community==0.2.6 langchain-chroma==0.1.2 langchain-core==0.2.11 langchain-text-splitters==0.2.2 langsmith==0.1.83 pypdf==4.2.0 streamlit==1.36.0 python-multipart==0.0.9 chroma-hnswlib==0.7.3 chromadb==0.5.3 torch==2.5.0
194 |
195 | # Download the model during script execution
196 | python -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L12-v2')"
197 |
198 | # Install JupyterLab
199 | pip install --user jupyterlab
200 |
201 | # Install OCI CLI
202 | echo "Installing OCI CLI..."
203 | curl -L https://raw.githubusercontent.com/oracle/oci-cli/master/scripts/install/install.sh -o install.sh
204 | chmod +x install.sh
205 | ./install.sh --accept-all-defaults
206 |
207 | # Verify the installation
208 | echo "Verifying OCI CLI installation..."
209 | oci --version || { echo "OCI CLI installation failed."; exit 1; }
210 |
211 | # Ensure all the binaries are added to PATH
212 | echo 'export PATH=$PATH:$HOME/.local/bin' >> $HOME/.bashrc
213 | source $HOME/.bashrc
214 |
215 | # Copy files from the git repo labs folder to the labs directory in the instance
216 | echo "Copying files from the 'labs' folder in the OU Git repository to the existing labs directory..."
217 | REPO_URL="https://github.com/ou-developers/ou-generativeai-pro.git"
218 | FINAL_DIR="$HOME/labs" # Existing directory on your instance
219 |
220 | # Initialize a new git repository
221 | git init
222 |
223 | # Add the remote repository
224 | git remote add origin $REPO_URL
225 |
226 | # Enable sparse-checkout and specify the folder to download
227 | git config core.sparseCheckout true
228 | echo "labs/*" >> .git/info/sparse-checkout
229 |
230 | # Pull only the specified folder into the existing directory
231 | git pull origin main # Replace 'main' with the correct branch name if necessary
232 |
233 | # Move the contents of the 'labs' subfolder to the root of FINAL_DIR, if necessary
234 | mv labs/* . 2>/dev/null || true # Move files if 'labs' folder exists
235 |
236 | # Remove any remaining empty 'labs' directory and .git folder
237 | rm -rf .git labs
238 |
239 | echo "Files successfully downloaded to $FINAL_DIR"
240 |
241 | EOF_OPC
242 |
243 | # Create the marker file to indicate the script has been run
244 | touch "$MARKER_FILE"
245 |
246 | echo "===== Cloud-Init Script Completed Successfully ====="
247 | exit 0
248 |
--------------------------------------------------------------------------------
/labs/pdf-docs/oci-ai-foundations.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ou-developers/ou-generativeai-pro/d95e9e524101a071e400498c6b58bd3c762a7109/labs/pdf-docs/oci-ai-foundations.pdf
--------------------------------------------------------------------------------