├── .DS_Store
├── assets
├── logo.png
├── main_page.png
└── processed_logo.png
├── src
├── static
│ ├── style.css
│ ├── script.js
│ ├── about.css
│ ├── about.html
│ ├── note_ai.css
│ ├── static_feature_card.html
│ ├── links.html
│ ├── sidebar.css
│ ├── main_chat_input.css
│ ├── secondpage.css
│ └── mian_area.css
├── utils
│ ├── constants.py
│ └── paths.py
├── config
│ └── config_loader.py
├── models
│ └── model_loader.py
└── app.py
├── docs
├── install_ollama.txt
├── install_sqlite3.txt
├── install_c_compiler_llama_cpp.txt
├── raw_requirements.txt
├── setup.md
└── langchain_tools.md
├── .gitignore
├── app
└── style.css
├── raw_requirements.txt
├── README.md
└── requirements.txt
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kuilenren/AI_Agent_Chatbot_Synapse/main/.DS_Store
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kuilenren/AI_Agent_Chatbot_Synapse/main/assets/logo.png
--------------------------------------------------------------------------------
/assets/main_page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kuilenren/AI_Agent_Chatbot_Synapse/main/assets/main_page.png
--------------------------------------------------------------------------------
/assets/processed_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kuilenren/AI_Agent_Chatbot_Synapse/main/assets/processed_logo.png
--------------------------------------------------------------------------------
/src/static/style.css:
--------------------------------------------------------------------------------
1 | /* static/style.css */
2 | body {
3 | background-color: #f0f2f6;
4 | }
5 |
6 | h1 {
7 | color: #3a3a3a;
8 | text-align: center;
9 | font-family: 'Helvetica', sans-serif;
10 | }
11 |
--------------------------------------------------------------------------------
/docs/install_ollama.txt:
--------------------------------------------------------------------------------
1 | curl -fsSL https://ollama.com/install.sh | sh
2 |
3 | # 0.4.1 -- current version
4 | # langchain-ollama -- 0.1.3
5 | # https://github.com/langchain-ai/langchain/commits/master/libs/partners/ollama/langchain_ollama/chat_models.py
6 |
7 | ollama run llama3.1:8b-instruct-q8_0
8 |
9 | /bye
--------------------------------------------------------------------------------
/src/static/script.js:
--------------------------------------------------------------------------------
1 | // static/script.js
2 | document.addEventListener("DOMContentLoaded", function () {
3 | const heading = document.querySelector("h1");
4 | if (heading) {
5 | heading.addEventListener("click", () => {
6 | alert("You clicked the heading!");
7 | });
8 | }
9 | });
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv_agent
2 | *.db
3 | *.db-journal
4 | *.db-wal
5 | *.db-shm
6 | */credentials.yaml
7 | */df.csv
8 | *.gguf
9 | *.ggml
10 | *.toml
11 | *.sqlite3
12 | *.parquet
13 | *.feather
14 | backup_sentence_transformer
15 | all-MiniLM-L6-v2
16 | nomic-embed-text-v2-moe
17 | *.safetensors
18 | snowflake-arctic-embed-l-v2.0
19 | __pycache__
20 | *.pkl
21 | *.index
22 | *.json
23 | *.xlsx
24 | *.csv
25 | *temp
--------------------------------------------------------------------------------
/docs/install_sqlite3.txt:
--------------------------------------------------------------------------------
1 | # install SQLite version 3.47.0 on a Linux VM:
2 | sudo apt update
3 | sudo apt install build-essential wget
4 | wget https://www.sqlite.org/2024/sqlite-autoconf-3470000.tar.gz
5 | tar xvf sqlite-autoconf-3470000.tar.gz
6 | cd sqlite-autoconf-3470000
7 | ./configure
8 | make
9 | sudo make install
10 | sqlite3 --version
11 | cd ..
12 | rm -rf sqlite-autoconf-3470000
13 | rm sqlite-autoconf-3470000.tar.gz
--------------------------------------------------------------------------------
/src/static/about.css:
--------------------------------------------------------------------------------
1 | .about-section-divider {
2 | height: 1.5px;
3 | background: linear-gradient(90deg, rgba(139, 148, 148, 0.01) 0%, rgba(139, 148, 148, 0.1) 50%, rgba(139, 148, 148, 0.01) 100%);
4 | margin: 15px 0;
5 | }
6 |
7 | .about-footer {
8 | font-size: 0.9rem;
9 | color: #666 important!;
10 | font-style: italic;
11 | line-height: 1.4;
12 | }
13 |
14 | .about-footer p {
15 | margin-bottom: 8px;
16 | color: #666 important!;
17 | }
--------------------------------------------------------------------------------
/src/utils/constants.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict
2 | import os
3 |
4 | TASK_OPTIONS: List[str] = [
5 | "Auto Selection",
6 | "Smart Search Agent",
7 | "Productivity Assistant",
8 | "Data Analysis Agent"
9 | ]
10 |
11 | DEFAULT_MODEL_PARAMS: Dict[str, any] = {
12 | "temperature": 0.1, #0.0,
13 | "top_k": 20, #15,
14 | "top_p": 0.4, #0.5,
15 | "repeat_penalty": 1.3,
16 | "n_ctx": 8192,
17 | "n_batch": 512,
18 | "n_gpu_layers": -1,
19 | "n_threads":os.cpu_count()
20 | }
--------------------------------------------------------------------------------
/docs/install_c_compiler_llama_cpp.txt:
--------------------------------------------------------------------------------
1 | sudo apt-get install build-essential g++ clang
2 | sudo apt-get update
3 | sudo apt-get install build-essential cmake python3-dev
4 |
5 | # sudo find / -name nvcc
6 |
7 | export CUDACXX=/usr/local/cuda-12.5/bin/nvcc
8 | export PATH=/usr/local/cuda-12.5/bin:$PATH
9 | CMAKE_ARGS="-DGGML_CUDA=on -DCUDA_PATH=/usr/local/cuda-12.5 -DCUDAToolkit_ROOT=/usr/local/cuda-12.5 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-12.5/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-12.5/lib64" FORCE_CMAKE=1 pip install llama-cpp-python==0.3.7 --no-cache-dir --force-reinstall
10 |
11 | # -DCMAKE_CUDA_ARCHITECTURES=80
--------------------------------------------------------------------------------
/app/style.css:
--------------------------------------------------------------------------------
1 | @import url('https://fonts.googleapis.com/css2?family=Lato:wght@300;400;700&display=swap');
2 |
3 | html, body, [class*="css"], h5, h4, h3, p, span, .sidebar-text, .sidebar-subtext, .logo-text, code.st-emotion-cache-1drfmnm, .st-bm, .st-ak, .st-bn, .st-bo, .st-bp, .st-ar, .st-bq, .st-br, .st-bs, .st-emotion-cache-sy3zga, .stCode,
4 | .stTextInput, .stTextArea, .stButton, .stSelectbox,
5 | .stMultiSelect, .stNumber, .stMarkdownContainer {
6 | font-family: 'Lato', monospace !important;
7 | padding: -0.3px 0px !important;
8 | color: #ffffff;
9 | }
10 |
11 | .stSidebar {
12 | background-color: #202222;
13 | }
14 |
--------------------------------------------------------------------------------
/src/static/about.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Explore AI Agents to generate responses tailored to your needs.
7 |
8 |
9 |
10 |
11 |
12 |
20 |
--------------------------------------------------------------------------------
/raw_requirements.txt:
--------------------------------------------------------------------------------
1 | # Ensure vs_BuildTools.exe is intalled with CMake checked and System Path setup
2 | langchain_community
3 | tf-keras
4 | langchain_huggingface
5 | langchain_experimental
6 | langchain_ollama
7 | faiss-cpu
8 | torch
9 | tensorflow
10 | accelerate
11 | peft
12 | bitsandbytes
13 | setuptools
14 | wheel
15 | cmake
16 | transformers
17 | sentence_transformers
18 | pyprojroot
19 | huggingface_hub
20 | # llama-cpp-python==0.2.90
21 | llama-cpp-python
22 | llamacpp
23 | pandas
24 | numpy
25 | tabulate
26 | pymysql
27 | langchain_groq
28 | langchain
29 | langchain_openai
30 | plotly
31 | nbformat
32 | seaborn
33 | streamlit
34 | streamlit_authenticator
35 | langgraph
36 | IPython
37 | openpyxl
38 |
--------------------------------------------------------------------------------
/docs/raw_requirements.txt:
--------------------------------------------------------------------------------
1 | # Ensure vs_BuildTools.exe is intalled with CMake checked and System Path setup
2 | langchain_community
3 | tf-keras
4 | langchain_huggingface
5 | langchain_experimental
6 | langchain_ollama
7 | faiss-cpu
8 | torch
9 | tensorflow
10 | accelerate
11 | peft
12 | bitsandbytes
13 | setuptools
14 | wheel
15 | cmake
16 | transformers
17 | sentence_transformers
18 | pyprojroot
19 | huggingface_hub
20 | tensorflow
21 | # llama-cpp-python -> install as per /docs/install_c_compiler_llama_cpp.txt
22 | # llamacpp
23 | pandas
24 | numpy
25 | tabulate
26 | pymysql
27 | langchain_groq
28 | langchain==0.2.14
29 | langchain_openai
30 | plotly==5.24.0
31 | nbformat
32 | seaborn
33 | streamlit
34 | langgraph
35 | IPython
36 | openpyxl
--------------------------------------------------------------------------------
/docs/setup.md:
--------------------------------------------------------------------------------
1 | # sudo apt install python3.12-venv
2 | python3.12 -m venv .venv_agent
3 | source .venv_agent/bin/activate
4 |
5 |
6 | # Manually download the ollama
7 | brew install ollama
8 | brew services start ollama
9 |
10 | # update ollama
11 | brew update
12 | brew upgrade ollama
13 |
14 | # Mac
15 | CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
16 |
17 |
18 | # Windows
19 | $env:CMAKE_ARGS = "-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS"
20 | pip install llama-cpp-python
21 |
22 |
23 | pip install -r raw_requirements.txt
24 | pip freeze > requirements.txt
25 |
26 | ollama ps
27 |
28 |
29 | streamlit run src/app.py
30 |
31 |
32 |
33 | ----
34 |
35 | ollama -v
36 | ollama version is 0.6.4
37 |
38 | pip show ollama
39 | Name: ollama
40 | Version: 0.4.7
41 | Summary: The official Python client for Ollama.
--------------------------------------------------------------------------------
/src/static/note_ai.css:
--------------------------------------------------------------------------------
1 | /*Sidebar Note*/
2 | .note-ai {
3 | position: fixed;
4 | bottom: 0;
5 | left: 20px;
6 | font-size: 0.85rem;
7 | color: #666;
8 | background: #191a1a !important;
9 | border-top: 1px solid rgba(221, 221, 221, 0.3);
10 | z-index: 999;
11 | padding: 0.7rem 1rem;
12 | width: 280px; /* Match sidebar width */
13 | box-shadow: 0 -2px 8px rgba(0, 0, 0, 0.05);
14 | backdrop-filter: blur(8px);
15 | border-radius: 8px 8px 0 0;
16 | box-shadow: 2px 2px 2px #2f747a;
17 | transition: all 0.3s ease;
18 | }
19 |
20 | /* Adjust for expanded sidebar */
21 | section[data-testid="stSidebar"][aria-expanded="true"] .note-ai {
22 | width: 280px;
23 | }
24 |
25 | @media screen and (max-width: 768px) {
26 | .note-ai {
27 | width: calc(85vw - 2rem); /* Match mobile sidebar width */
28 | font-size: 0.8rem;
29 | padding: 0.6rem 1rem;
30 | }
31 | }
32 |
33 |
34 |
--------------------------------------------------------------------------------
/src/static/static_feature_card.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
🌐
6 |
Smart Search Agent
7 |
Search the web in real time, summarize topics, and gather up-to-date information.
8 |
9 |
10 |
🗂️
11 |
Productivity Assistant
12 |
Automate daily tasks like email, calendar, and to-do lists.
13 |
14 |
15 |
📊
16 |
Data Analysis Agent
17 |
Analyze large datasets, generate visualizations, and provide insights.
18 |
19 |
--------------------------------------------------------------------------------
/src/static/links.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/utils/paths.py:
--------------------------------------------------------------------------------
1 | # utils/paths.py
2 | import os
3 | from pathlib import Path
4 | from typing import Dict
5 |
6 | class PathManager:
7 | def __init__(self):
8 | # Base paths
9 | self.APP_DIR = Path("/Users/rajmaharajwala/raj-maharajwala-github/ai_agent")
10 | self.ASSETS_DIR = self.APP_DIR / "assets"
11 | self.MODELS_DIR = self.APP_DIR / "models"
12 | self.VECTORDB_DIR = self.APP_DIR / "index_vectordb" / "productivity"
13 |
14 | @property
15 | def model_paths(self) -> Dict[str, Path]:
16 | """Returns a dictionary of model paths"""
17 | return {
18 | "EMBEDDINGS": self.MODELS_DIR / "snowflake-arctic-embed-l-v2.0",
19 | "OLLAMA": "qwq:32b-q8_0"
20 | }
21 |
22 | @property
23 | def vectordb(self) -> Dict[str, Path]:
24 | """Returns a dictionary of vectordb paths"""
25 | return {
26 | "CSV_PATH": self.VECTORDB_DIR / "sql_examples_productivity.csv",
27 | "INDEX_PATH": self.VECTORDB_DIR / "sql-example-faiss-productivity-index-L2.index",
28 | "METADATA_DIR": self.VECTORDB_DIR / "sql-example-productivity-metadata.pkl"
29 | }
30 |
31 | @property
32 | def asset_paths(self) -> Dict[str, Path]:
33 | """Returns a dictionary of asset paths"""
34 | return {
35 | "LOGO": self.ASSETS_DIR / "logo.png",
36 | "CSS": self.ASSETS_DIR / "styles.css",
37 | # "CONFIG": self.ASSETS_DIR / "config.yaml"
38 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Synapse Workflows
2 |
3 | ## Overview
4 | Synapse Workflows is a platform that allows users to interact with specialized AI agents through natural language. The platform offers three main types of intelligent agents designed to assist with different tasks:
5 |
6 | ## UI Chatbot using Streamlit, HTML, CSS, and JavaScript
7 | 
8 |
9 | 1. **Smart Search Agent** - Searches the web in real-time, summarizes topics, and gathers up-to-date information.
10 |
11 | 2. **Productivity Assistant** - Automates daily tasks including email management, calendar organization, and to-do lists.
12 |
13 | 3. **Data Analysis Agent** - Analyzes large datasets, generates visualizations, and provides insights.
14 |
15 | ## Features
16 | - Intuitive natural language interface
17 | - Multiple specialized AI agents for different use cases
18 | - Real-time web search capabilities
19 | - Task automation for productivity
20 | - Data analysis and visualization tools
21 |
22 | ## Getting Started
23 | Users can begin by selecting one of the three available agents from the main interface or by typing queries directly into the "Ask Anything..." input field.
24 |
25 | ## Notes
26 | - The platform indicates that AI workflows may occasionally produce unexpected results.
27 | - The interface includes a "Deploy" button in the upper right for deployment options.
28 |
29 | ## Support
30 | For more detailed information, users can access the documentation section through the link provided at the bottom of the interface.
--------------------------------------------------------------------------------
/src/config/config_loader.py:
--------------------------------------------------------------------------------
1 | # src/config/config_loader.py
2 | import os
3 | import yaml
4 | import streamlit_authenticator as stauth
5 | from typing import Dict, List, Optional
6 |
7 | class ConfigLoader:
8 | def __init__(self, config_dir: str = '/mnt/data/ai_chatbot/code.acrivon/AP3-Chatbot/config'):
9 | self.config_dir = config_dir
10 | self.configs = {}
11 | self.authenticator = None
12 |
13 | def load_config(self, yaml_files: List[str]) -> Dict:
14 | """Load multiple YAML configuration files."""
15 | for yaml_file in yaml_files:
16 | config_path = os.path.join(self.config_dir, f'{yaml_file}.yaml')
17 | try:
18 | with open(config_path, 'r') as file:
19 | self.configs[yaml_file] = yaml.safe_load(file)
20 | except Exception as e:
21 | print(f"Error loading config {yaml_file}: {e}")
22 | self.configs[yaml_file] = {}
23 | return self.configs
24 |
25 | def get_config(self, config_name: str) -> Dict:
26 | """Get specific configuration by name."""
27 | return self.configs.get(config_name, {})
28 |
29 | def init_authentication(self) -> Optional[stauth.Authenticate]:
30 | """Initialize authentication with proper configuration"""
31 | try:
32 | config_path = os.path.join(self.config_dir, 'auth.yaml')
33 | if not os.path.exists(config_path):
34 | raise FileNotFoundError(f"Auth config not found at {config_path}")
35 |
36 | with open(config_path, 'r') as file:
37 | config = yaml.safe_load(file)
38 |
39 | # Initialize authenticator with required parameters
40 | authenticator = stauth.Authenticate(
41 | credentials=config_path,
42 | cookie_name=config['cookie']['name'],
43 | key=config['cookie']['key'],
44 | cookie_expiry_days=config['cookie']['expiry_days'],
45 | auto_hash = True
46 | )
47 | return authenticator
48 |
49 | except Exception as e:
50 | print(f"Authentication initialization error: {e}")
51 | return None
--------------------------------------------------------------------------------
/docs/langchain_tools.md:
--------------------------------------------------------------------------------
1 | ## Search
2 |
3 | The following table shows tools that execute online searches in some shape or form:
4 | [Bing Search](https://python.langchain.com/docs/integrations/tools/bing_search/)
5 | [Brave Search](https://python.langchain.com/docs/integrations/tools/brave_search/)
6 | [DuckDuckgoSearch](https://python.langchain.com/docs/integrations/tools/ddg/)
7 | [Exa Search](https://python.langchain.com/docs/integrations/tools/exa_search/)
8 | [Google Search](https://python.langchain.com/docs/integrations/tools/google_search/)
9 | [Google Serper](https://python.langchain.com/docs/integrations/tools/google_serper/)
10 | [Jina Search](https://python.langchain.com/docs/integrations/tools/jina_search/)
11 | [Mojeek Search](https://python.langchain.com/docs/integrations/tools/mojeek_search/)
12 | [SearchApi](https://python.langchain.com/docs/integrations/tools/searchapi/)
13 | [SearxNG Search](https://python.langchain.com/docs/integrations/tools/searx_search/)
14 | [SerpAPI](https://python.langchain.com/docs/integrations/tools/serpapi/)
15 | [Tavily Search](https://python.langchain.com/docs/integrations/tools/tavily_search/)
16 | [You.com Search](https://python.langchain.com/docs/integrations/tools/you/)
17 |
18 | ## Code Interpreter
19 |
20 | The following table shows tools that can be used as code interpreters:
21 | [Azure Container Apps dynamic sessions](https://python.langchain.com/docs/integrations/tools/azure_dynamic_sessions/)
22 | [Bearly Code Interpreter](https://python.langchain.com/docs/integrations/tools/bearly/)
23 | [Riza Code Interpreter](https://python.langchain.com/docs/integrations/tools/riza/)
24 |
25 | ## Productivity
26 |
27 | The following table shows tools that can be used to automate tasks in productivity tools:
28 | [Github Toolkit](https://python.langchain.com/docs/integrations/tools/github/)
29 | [Gitlab Toolkit](https://python.langchain.com/docs/integrations/tools/gitlab/)
30 | [Gmail Toolkit](https://python.langchain.com/docs/integrations/tools/gmail/)
31 | [Infobip Tool](https://python.langchain.com/docs/integrations/tools/infobip/)
32 | [Jira Toolkit](https://python.langchain.com/docs/integrations/tools/jira/)
33 | [rate limits](https://developer.atlassian.com/cloud/jira/platform/rate-limiting/)
34 | [Office365 Toolkit](https://python.langchain.com/docs/integrations/tools/office365/)
35 | [rate limits](https://learn.microsoft.com/en-us/graph/throttling-limits)
36 | [Slack Toolkit](https://python.langchain.com/docs/integrations/tools/slack/)
37 | [Twilio Tool](https://python.langchain.com/docs/integrations/tools/twilio/)
38 | [pay-as-you-go pricing](https://www.twilio.com/en-us/pricing)
39 |
40 | ## Web Browsing
41 |
42 | The following table shows tools that can be used to automate tasks in web browsers:
43 | [AgentQL Toolkit](https://python.langchain.com/docs/integrations/tools/agentql/)
44 | [Hyperbrowser Browser Agent Tools](https://python.langchain.com/docs/integrations/tools/hyperbrowser_browser_agent_tools/)
45 | [Hyperbrowser Web Scraping Tools](https://python.langchain.com/docs/integrations/tools/hyperbrowser_web_scraping_tools/)
46 | [MultiOn Toolkit](https://python.langchain.com/docs/integrations/tools/multion/)
47 | [PlayWright Browser Toolkit](https://python.langchain.com/docs/integrations/tools/playwright/)
48 | [Requests Toolkit](https://python.langchain.com/docs/integrations/tools/requests/)
49 |
50 | ## Database
51 |
52 | The following table shows tools that can be used to automate tasks in databases:
53 | [Cassandra Database Toolkit](https://python.langchain.com/docs/integrations/tools/cassandra_database/)
54 | [SQLDatabase Toolkit](https://python.langchain.com/docs/integrations/tools/sql_database/)
55 | [Spark SQL Toolkit](https://python.langchain.com/docs/integrations/tools/spark_sql/)
56 |
57 | ## Finance
58 |
59 | The following table shows tools that can be used to execute financial transactions such as payments, purchases, and more:
60 | [GOAT](https://python.langchain.com/docs/integrations/tools/goat/)
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==2.2.2
2 | accelerate==1.6.0
3 | aiohappyeyeballs==2.6.1
4 | aiohttp==3.11.18
5 | aiosignal==1.3.2
6 | altair==5.5.0
7 | annotated-types==0.7.0
8 | anyio==4.9.0
9 | asttokens==3.0.0
10 | astunparse==1.6.3
11 | attrs==25.3.0
12 | bcrypt==4.3.0
13 | bitsandbytes==0.42.0
14 | blinker==1.9.0
15 | cachetools==5.5.2
16 | captcha==0.7.1
17 | certifi==2025.1.31
18 | cffi==1.17.1
19 | charset-normalizer==3.4.1
20 | click==8.1.8
21 | cmake==4.0.0
22 | contourpy==1.3.2
23 | cryptography==44.0.2
24 | cycler==0.12.1
25 | dataclasses-json==0.6.7
26 | decorator==5.2.1
27 | diskcache==5.6.3
28 | distro==1.9.0
29 | et_xmlfile==2.0.0
30 | executing==2.2.0
31 | extra-streamlit-components==0.1.80
32 | faiss-cpu==1.10.0
33 | fastjsonschema==2.21.1
34 | filelock==3.18.0
35 | flatbuffers==25.2.10
36 | fonttools==4.57.0
37 | frozenlist==1.6.0
38 | fsspec==2025.3.2
39 | gast==0.6.0
40 | gitdb==4.0.12
41 | GitPython==3.1.44
42 | google-pasta==0.2.0
43 | groq==0.23.0
44 | grpcio==1.71.0
45 | h11==0.14.0
46 | h5py==3.13.0
47 | httpcore==1.0.8
48 | httpx==0.28.1
49 | httpx-sse==0.4.0
50 | huggingface-hub==0.30.2
51 | idna==3.10
52 | ipython==9.1.0
53 | ipython_pygments_lexers==1.1.1
54 | jedi==0.19.2
55 | Jinja2==3.1.6
56 | jiter==0.9.0
57 | joblib==1.4.2
58 | jsonpatch==1.33
59 | jsonpointer==3.0.0
60 | jsonschema==4.23.0
61 | jsonschema-specifications==2024.10.1
62 | jupyter_core==5.7.2
63 | keras==3.9.2
64 | kiwisolver==1.4.8
65 | langchain==0.3.24
66 | langchain-community==0.3.22
67 | langchain-core==0.3.55
68 | langchain-experimental==0.3.4
69 | langchain-groq==0.3.2
70 | langchain-huggingface==0.1.2
71 | langchain-ollama==0.3.2
72 | langchain-openai==0.3.14
73 | langchain-text-splitters==0.3.8
74 | langgraph==0.3.31
75 | langgraph-checkpoint==2.0.24
76 | langgraph-prebuilt==0.1.8
77 | langgraph-sdk==0.1.63
78 | langsmith==0.3.33
79 | libclang==18.1.1
80 | llama_cpp_python==0.3.8
81 | llamacpp==0.1.14
82 | Markdown==3.8
83 | markdown-it-py==3.0.0
84 | MarkupSafe==3.0.2
85 | marshmallow==3.26.1
86 | matplotlib==3.10.1
87 | matplotlib-inline==0.1.7
88 | mdurl==0.1.2
89 | ml_dtypes==0.5.1
90 | mpmath==1.3.0
91 | multidict==6.4.3
92 | mypy_extensions==1.1.0
93 | namex==0.0.9
94 | narwhals==1.35.0
95 | nbformat==5.10.4
96 | networkx==3.4.2
97 | numpy==2.1.3
98 | ollama==0.4.8
99 | openai==1.75.0
100 | openpyxl==3.1.5
101 | opt_einsum==3.4.0
102 | optree==0.15.0
103 | orjson==3.10.16
104 | ormsgpack==1.9.1
105 | packaging==24.2
106 | pandas==2.2.3
107 | parso==0.8.4
108 | peft==0.15.2
109 | pexpect==4.9.0
110 | pillow==11.2.1
111 | platformdirs==4.3.7
112 | plotly==6.0.1
113 | prompt_toolkit==3.0.51
114 | propcache==0.3.1
115 | protobuf==5.29.4
116 | psutil==7.0.0
117 | ptyprocess==0.7.0
118 | pure_eval==0.2.3
119 | pyarrow==19.0.1
120 | pycparser==2.22
121 | pydantic==2.11.3
122 | pydantic-settings==2.9.1
123 | pydantic_core==2.33.1
124 | pydeck==0.9.1
125 | Pygments==2.19.1
126 | PyJWT==2.10.1
127 | PyMySQL==1.1.1
128 | pyparsing==3.2.3
129 | pyprojroot==0.3.0
130 | python-dateutil==2.9.0.post0
131 | python-dotenv==1.1.0
132 | pytz==2025.2
133 | PyYAML==6.0.2
134 | referencing==0.36.2
135 | regex==2024.11.6
136 | requests==2.32.3
137 | requests-toolbelt==1.0.0
138 | rich==14.0.0
139 | rpds-py==0.24.0
140 | safetensors==0.5.3
141 | scikit-learn==1.6.1
142 | scipy==1.15.2
143 | seaborn==0.13.2
144 | sentence-transformers==4.1.0
145 | setuptools==79.0.0
146 | six==1.17.0
147 | smmap==5.0.2
148 | sniffio==1.3.1
149 | SQLAlchemy==2.0.40
150 | stack-data==0.6.3
151 | streamlit==1.44.1
152 | streamlit-authenticator==0.4.2
153 | sympy==1.13.1
154 | tabulate==0.9.0
155 | tenacity==9.1.2
156 | tensorboard==2.19.0
157 | tensorboard-data-server==0.7.2
158 | tensorflow==2.19.0
159 | termcolor==3.0.1
160 | tf_keras==2.19.0
161 | threadpoolctl==3.6.0
162 | tiktoken==0.9.0
163 | tokenizers==0.21.1
164 | toml==0.10.2
165 | torch==2.6.0
166 | tornado==6.4.2
167 | tqdm==4.67.1
168 | traitlets==5.14.3
169 | transformers==4.51.3
170 | typing-inspect==0.9.0
171 | typing-inspection==0.4.0
172 | typing_extensions==4.13.2
173 | tzdata==2025.2
174 | urllib3==2.4.0
175 | wcwidth==0.2.13
176 | Werkzeug==3.1.3
177 | wheel==0.45.1
178 | wrapt==1.17.2
179 | xxhash==3.5.0
180 | yarl==1.20.0
181 | zstandard==0.23.0
182 |
--------------------------------------------------------------------------------
/src/models/model_loader.py:
--------------------------------------------------------------------------------
1 | from llama_cpp import Llama
2 | # from ollama_functions_custom import OllamaFunctions
3 | from typing import Optional, Dict
4 | from utils.paths import PathManager # CUSTOM_CLASS
5 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6 | from langchain_openai import ChatOpenAI
7 | # from langchain_groq import ChatGroq
8 | from langchain_ollama import ChatOllama
9 | from langchain_ollama.llms import OllamaLLM
10 | import torch
11 | import os
12 |
13 | class ModelLoader:
14 | def __init__(self, configs: Dict):
15 | self.configs = configs
16 | self.path_manager = PathManager()
17 | self.model_paths = self.path_manager.model_paths
18 | def load_model(self, task: str) -> any:
19 | """Load appropriate model based on task and parameters."""
20 |
21 | if task == 'Productivity Assistant':
22 | return self._load_reasoning(
23 | model_path = str(self.model_paths["OLLAMA_Deepseek-R1"]),
24 | base_url = 'http://127.0.0.1:11434'
25 | # **model_params
26 | )
27 |
28 | elif task == 'Auto Selection':
29 | self._load_default_ollama(
30 | # base_url=self.configs['model']['end_point_local'],
31 | model_path = str(self.model_paths["OLLAMA_QWEN"]),
32 | base_url = 'http://127.0.0.1:11434'
33 | )
34 |
35 | return self._load_default_ollama(
36 | # base_url=self.configs['model']['end_point_local'],
37 | model_path = str(self.model_paths["OLLAMA_QWEN"]),
38 | base_url = 'http://127.0.0.1:11434'
39 | # **model_params
40 | )
41 |
42 | def _load_reasoning(self, model_path: str, base_url: str) -> OllamaLLM: # **kwargs
43 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
44 | if torch.cuda.is_available():
45 | torch.cuda.empty_cache()
46 |
47 | return OllamaLLM(
48 | base_url=base_url,
49 | model=model_path,
50 | top_k=20,
51 | top_p=0.4,
52 | temperature=0.0,
53 | num_ctx = 29184,
54 | verbose=True,
55 | callbacks=[StreamingStdOutCallbackHandler()],
56 | streaming=True,
57 | seed = -1,
58 | num_gpu=-2, # Explicitly use one device GPU (Total - 1)
59 | keep_alive=-1,
60 | f16=True,
61 | # mirostat=1, # Added adaptive sampling for better quality
62 | # mirostat_tau=4.0, # Conservative tau value for factual responses
63 | # mirostat_eta=0.1 # Learning rate for mirostat
64 | )
65 |
66 | def _load_default_ollama(self, model_path: str, base_url: str) -> ChatOllama: # , **kwargs
67 | os.environ["CUDA_VISIBLE_DEVICES"] = "1"
68 | return ChatOllama(
69 | base_url=base_url,
70 | model=model_path,
71 | top_k=30, # Increased from 20 to consider more token candidates
72 | top_p=0.4,
73 | temperature=0.03, # Reduced slightly for more deterministic/factual responses
74 | num_ctx=15000,#19184, # Increased context window to maximum supported
75 | verbose=False,
76 | format='json',
77 | num_gpu=-2, # Explicitly use one device GPU (Total - 1)
78 | keep_alive=-1,
79 | seed=42, # Fixed seed for reproducibility
80 | f16=True,
81 | mirostat=1, # Added adaptive sampling for better quality
82 | mirostat_tau=4.0, # Conservative tau value for factual responses
83 | mirostat_eta=0.1 # Learning rate for mirostat
84 | )
85 |
86 | ###############
87 | """
88 | sudo systemctl stop ollama
89 | sudo nano /etc/systemd/system/ollama.service
90 | [Service]
91 | Environment="OLLAMA_FLASH_ATTENTION=1"
92 | Environment="OLLAMA_KV_CACHE_TYPE=q8_0"
93 | Environment="OLLAMA_MAX_LOADED_MODELS=3"
94 | Environment="OLLAMA_NUM_PARALLEL=4"
95 | Environment="OLLAMA_NUMA=1"
96 | # Environment="OLLAMA_KEEP_ALIVE=-1"
97 | sudo systemctl daemon-reload
98 | sudo systemctl restart ollama
99 | ---
100 | torch.cuda.empty_cache()
101 | ---
102 | sudo journalctl -u ollama.service | grep "kv_cache"
103 | ---
104 | sudo systemctl stop ollama
105 | ---
106 | OLLAMA_KEEP_ALIVE=-1 OLLAMA_FLASH_ATTENTION=true OLLAMA_KV_CACHE_TYPE=q8_0 OLLAMA_MAX_LOADED_MODELS=3 ollama serve
107 | (or)
108 | export OLLAMA_MAX_LOADED_MODELS=3
109 | export OLLAMA_NUM_PARALLEL=4
110 | export OLLAMA_FLASH_ATTENTION=true
111 | export OLLAMA_KV_CACHE_TYPE=q8_0
112 | ollama serve
113 | ---
114 | nvidia-smi -l 1
115 | ---
116 | """
117 | #####
--------------------------------------------------------------------------------
/src/static/sidebar.css:
--------------------------------------------------------------------------------
1 | @keyframes rotate-y {
2 | 0% { transform: rotateY(0deg); }
3 | 100% { transform: rotateY(360deg); }
4 | }
5 |
6 | @keyframes rotate-z {
7 | 0% { transform: rotateZ(0deg); }
8 | 100% { transform: rotateZ(360deg); }
9 | }
10 |
11 | .rotating-logo {
12 | width: 60px;
13 | height: 64px;
14 | transform-style: preserve-3d;
15 | z-index: 9999;
16 | overflow: hidden;
17 | display: inline-block;
18 | vertical-align: middle;
19 | margin: -60px 5px 0 10px;
20 | transition: transform 0.3s ease, filter 0.3s ease;
21 | border-radius: 12px;
22 | }
23 |
24 | .rotating-logo:hover {
25 | animation: rotate-z 8s infinite cubic-bezier(0.68, -0.55, 0.265, 1.55);
26 | filter: drop-shadow(0px 4px 8px rgba(0, 0, 0, 0.15));
27 | transform: scale(1.05);
28 | }
29 |
30 | .sidebar-text {
31 | color: #e0e0e0;
32 | margin-left: -18px;
33 | text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.3);
34 | font-size: clamp(1.4rem, 1.2vw + 0.8rem, 1.7rem);
35 | font-weight: 700;
36 | padding: 0.6em 0.8em;
37 | display: inline-block;
38 | vertical-align: middle;
39 | margin-top: -68px;
40 | white-space: nowrap;
41 | overflow: hidden;
42 | transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
43 | text-overflow: ellipsis;
44 | will-change: transform;
45 | text-shadow: 1px 1px 2px rgba(255, 255, 255, 0.8);
46 | letter-spacing: -0.02em;
47 | }
48 |
49 | section[data-testid="stSidebar"][aria-expanded="true"] {
50 | position: relative;
51 | height: 100vh;
52 | overflow: hidden;
53 | min-width: 280px;
54 | max-width: 320px;
55 | background: #191a1a;
56 | box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
57 | border-right: 1px solid rgba(255, 255, 255, 0.05);
58 | }
59 |
60 | .sidebar-text:hover {
61 | color: #16a085;
62 | transform: translateX(3px);
63 | }
64 |
65 | .sidebar-subtext {
66 | display: flex;
67 | justify-content: left;
68 | align-items: left;
69 | color: #9ca3af;
70 | font-size: calc(0.8em + 0.2vw);
71 | margin: -15px 0 -30px 15px;
72 | padding: 0.8rem;
73 | width: calc(100% - 1rem);
74 | white-space: nowrap;
75 | font-weight: 500;
76 | letter-spacing: 0.02em;
77 | text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.3);
78 | transition: all 0.3s ease;
79 | }
80 |
81 | .sidebar-subtext:hover {
82 | color: #16a085;
83 | transform: translateX(3px);
84 | }
85 |
86 | div[data-testid="stSidebarUserContent"] {
87 | height: calc(100vh - 60px);
88 | display: flex;
89 | flex-direction: column;
90 | padding: 1rem 0.5rem;
91 | }
92 |
93 | section[data-testid="stSidebar"] div.stButton {
94 | display: flex;
95 | position: fixed;
96 | bottom: 0;
97 | justify-content: flex-end;
98 | padding: 1rem;
99 | z-index: 1;
100 | background: linear-gradient(to top, rgba(32,34,34,0.95) 0%, rgba(32,34,34,0) 100%);
101 | }
102 |
103 | section[data-testid="stSidebar"] div.stButton > button {
104 | width: 110px;
105 | bottom: 15px;
106 | left: auto;
107 | border-radius: 8px;
108 | transition: all 0.2s ease;
109 | background: #2a2d2d;
110 | color: #e0e0e0;
111 | border: 1px solid rgba(255, 255, 255, 0.1);
112 | }
113 |
114 | section[data-testid="stSidebar"] div.stButton > button:hover {
115 | transform: translateY(-2px);
116 | background: #323535;
117 | border-color: rgba(255, 255, 255, 0.2);
118 | }
119 |
120 | .sidebar-footer {
121 | display: flex;
122 | position: fixed;
123 | bottom: 0;
124 | width: 100%;
125 | background-color: transparent;
126 | color: #94a3b8;
127 | z-index: 999;
128 | padding: 1rem;
129 | font-size: 0.9em;
130 | }
131 |
132 | .sidebar-divider {
133 | margin: 5px 0;
134 | border: 0;
135 | height: 1px;
136 | background: linear-gradient(to right, rgba(255,255,255,0.02) 0%, rgba(255,255,255,0.1) 50%, rgba(255,255,255,0.02) 100%);
137 | }
138 |
139 | .logo-container {
140 | display: flex;
141 | align-items: center;
142 | justify-content: flex-start;
143 | margin: 15px 0 0 0;
144 | padding: 0.5rem;
145 | background: transparent;
146 | border-radius: 12px;
147 | backdrop-filter: blur(10px);
148 | border: transparent;
149 | }
150 |
151 | .streamlit-expanderHeader {
152 | font-size: 1.2em;
153 | font-weight: 600;
154 | color: #e0e0e0;
155 | transition: color 0.2s ease;
156 | }
157 |
158 | .streamlit-expanderHeader:hover {
159 | color: #16a085;
160 | }
161 |
162 | .streamlit-expander {
163 | width: 100% !important;
164 | margin-left: 50px !important;
165 | min-width: 200px;
166 | max-width: 280px;
167 | border-radius: 8px;
168 | border: 1px solid rgba(255, 255, 255, 0.05);
169 | margin: 0.5rem 0;
170 | }
171 |
172 | .stSelectbox > div > div {
173 | background-color: rgba(42, 50, 49, 0.7) !important;
174 | }
175 |
176 | .stSelectbox [data-baseweb="select"] > div {
177 | margin-left: 10px !important;
178 | margin-top: -10px !important;
179 | min-width: 200px;
180 | max-width: 280px;
181 | border: 1px solid dark-grey !important;
182 | border-right: 5px solid #2f747a !important;
183 | }
184 |
185 | @media screen and (max-width: 768px) {
186 | [data-testid="stSidebar"][aria-expanded="true"] {
187 | min-width: 85vw;
188 | max-width: 85vw;
189 | }
190 |
191 | .sidebar-text {
192 | font-size: calc(1.2em + 0.5vw);
193 | }
194 |
195 | .logo-container {
196 | margin: 10px 0 0 0;
197 | margin-top: 100px;
198 | }
199 | }
200 |
201 |
202 |
203 |
--------------------------------------------------------------------------------
/src/static/main_chat_input.css:
--------------------------------------------------------------------------------
1 | /* main_chat_input.css */
2 |
3 | /* Chat input textarea hover/focus styles */
4 | .stChatInput textarea, div[data-testid="stChatInput"] textarea {
5 | caret-color: #e0e0e0 !important;
6 | }
7 |
8 |
9 | /* Style the main chat input container */
10 | .stChatInput, div[data-testid="stChatInput"] {
11 | height: auto !important;
12 | min-height: 150px !important;
13 | padding: 15px !important;
14 | border-radius: 36px !important;
15 | background-color: #292c2c !important;
16 | border: 1px solid rgba(255, 255, 255, 0.072) !important;
17 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2) !important;
18 | margin-top: -50px;
19 | }
20 |
21 |
22 | /* Style the inner container - new in 1.42.2 */
23 | .st-emotion-cache-10nh6ap, div[data-testid="stChatInput"] > div {
24 | display: flex !important;
25 | flex-direction: column !important;
26 | width: 100% !important;
27 | border: none !important;
28 | box-shadow: none !important;
29 | background-color: transparent !important;
30 | width: 95% !important;
31 | max-width: 100% !important;
32 | height: auto !important;
33 | min-height: 150px !important;
34 | position: relative !important;
35 | }
36 |
37 | .st-emotion-cache-nbth8s {
38 | position: absolute !important;
39 | left: 10px !important;
40 | bottom: 20px !important;
41 | z-index: 10 !important;
42 | order: 2 !important;
43 | }
44 |
45 | /* Reposition the file upload button container */
46 | [data-testid="stChatInputFileUploadButton"] {
47 | border-radius: 50% !important;
48 | padding-left: 9px !important;
49 | padding-bottom: 1px !important;
50 | align-self: right !important;
51 | /* margin-left: 679px !important; */
52 | background-color: transparent !important;
53 | box-shadow: -0.5px 1px 0px rgba(0,0,0,0.1) !important;
54 | font-weight: 500 !important;
55 | font-size: 14px !important;
56 | cursor: pointer !important;
57 | transition: all 0.3s ease !important;
58 | width: 40px !important;
59 | height: 40px !important;
60 | left: -7px !important;
61 | bottom: -20px !important;
62 | position: absolute !important;
63 | z-index: 10 !important;
64 | }
65 |
66 | [data-testid="stChatInputFileUploadButton"] svg {
67 | fill: #8c9494 !important;
68 | }
69 |
70 | [data-testid="stChatInputFileUploadButton"]:hover {
71 | background-color: #c4d4d4 !important;
72 | color: #94bcbc !important;
73 | box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.3) !important;
74 | border-color: rgba(255, 255, 255, 0.2) !important;
75 | animation: pulse 0.6s ease-out !important;
76 | }
77 |
78 | [data-testid="stChatInputFileUploadButton"]:hover, [data-testid="stChatInputFileUploadButton"]:hover svg {
79 | fill: #212121 !important;
80 | }
81 |
82 | @keyframes pulse {
83 | 0% { transform: scale(1); }
84 | 50% { transform: scale(1.1); }
85 | 100% { transform: scale(1); }
86 | }
87 |
88 | /* Reposition line next 2 file upload button */
89 | .st-emotion-cache-g39xc6 {
90 |
91 | margin-left: 29px !important;
92 | margin-right: 0px !important;
93 | border: none !important;
94 | position: absolute !important;
95 | bottom: 5px !important;
96 | background-color: transparent !important;
97 | z-index: 10 !important;
98 | order: 2 !important;
99 | min-height: 140px !important;
100 | }
101 |
102 | /* Style the textarea container */
103 | .stChatInput [data-baseweb="textarea"] {
104 | border: none !important;
105 | box-shadow: none !important;
106 | background-color: transparent !important;
107 | width: 95% !important;
108 | max-width: 100% !important;
109 | order: 1 !important;
110 | }
111 |
112 | /* Style the actual textarea element */
113 | [data-testid="stChatInputTextArea"] {
114 | min-height: 145px !important;
115 | max-height: 280px !important;
116 | width: 100% !important;
117 | max-width: 100% !important;
118 | overflow-y: auto !important;
119 | overflow-x: hidden !important;
120 | padding: 12px !important;
121 | padding-left: 35px !important; /* Make room for the attachment button */
122 | font-size: 20px !important;
123 | line-height: 1.5 !important;
124 | box-shadow: none !important;
125 | word-wrap: break-word !important;
126 | white-space: pre-wrap !important;
127 | color: #e0e0e0 !important;
128 | background-color: transparent !important;
129 | }
130 |
131 | [data-testid="stChatInputTextArea"]::placeholder {
132 | color: rgba(224, 225, 225, 0.856) !important;
133 | }
134 |
135 | /* Style the base input container */
136 | [data-baseweb="base-input"] {
137 | border: none !important;
138 | width: 100% !important;
139 | max-width: 96% !important;
140 | box-shadow: none !important;
141 | background-color: transparent !important;
142 | min-height: 110px !important;
143 | max-height: 280px !important;
144 | }
145 |
146 | /* Style the button container - new in 1.42.2 */
147 | .st-emotion-cache-sey4o0 {
148 | align-self: flex-end !important;
149 | z-index: 5 !important;
150 |
151 | }
152 |
153 | /* Adjust the send button position */
154 | [data-testid="stChatInputSubmitButton"] {
155 | border-radius: 50% !important;
156 | align-self: flex-end !important;
157 | margin-left: 10px !important;
158 | color: #e0e0e0 !important;
159 | background-color: #2f747a !important;
160 | margin-bottom: 1px !important;
161 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2) !important;
162 | border: 1px solid rgba(255, 255, 255, 0.1) !important;
163 | font-weight: 500 !important;
164 | font-size: 14px !important;
165 | cursor: pointer !important;
166 | transition: all 0.3s ease !important;
167 | }
168 |
169 | [data-testid="stChatInputSubmitButton"]:hover {
170 | background-color: #68abac !important;
171 | color: #212121 !important;
172 | box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.3) !important;
173 | border-color: rgba(255, 255, 255, 0.2) !important;
174 | animation: pulse 0.6s ease-out !important;
175 | }
176 |
177 | @keyframes pulse {
178 | 0% { transform: scale(1); }
179 | 50% { transform: scale(1.1); }
180 | 100% { transform: scale(1); }
181 | }
--------------------------------------------------------------------------------
/src/static/secondpage.css:
--------------------------------------------------------------------------------
1 | /* Custom styling for inline code */
2 | [data-testid="stChatMessage"] .assistant-title-start code {
3 | background-color: #f0f0f0;
4 | vertical-align: middle;
5 | font-family: monospace;
6 | font-size: 0.65em;
7 | color: #6f8695;
8 | font-weight: 500;
9 | }
10 | [data-testid="stChatMessage"] .assistant-title code {
11 | background-color: #f0f0f0;
12 | vertical-align: middle;
13 | font-family: monospace;
14 | font-size: 0.65em;
15 | color: #6f8695;
16 | font-weight: 500;
17 | }
18 | [data-testid="stChatMessage"] .assistant-title-start {
19 | color: #CC7900;
20 | margin-top: -10px;
21 | }
22 | [data-testid="stChatMessage"] .cyan-title-start {
23 | color: #22808e;
24 | margin-top: -10px;
25 | }
26 | [data-testid="stChatMessage"] .cyan-title-start code {
27 | color: #e83e8c;
28 | margin-top: -10px;
29 | }
30 | [data-testid="stChatMessage"] .assistant-title {
31 | color: #CC7900;
32 | margin-top: 0px;
33 | }
34 | [data-testid="stChatMessage"] .human-heading {
35 | color: #FF9800;
36 | }
37 | [data-testid="stChatMessage"] .ai-heading {
38 | color: #2196F3;
39 | }
40 | [data-testid="stChatMessage"] .tool-heading {
41 | color: #FF9800;
42 | }
43 | [data-testid="stChatMessage"] .expander-title {
44 | color: #673AB7;
45 | }
46 | [data-testid="stChatMessage"] .stop-button {
47 | background-color: #ff4b4b;
48 | color: white;
49 | border: none;
50 | border-radius: 50%;
51 | width: 40px;
52 | height: 40px;
53 | font-size: 20px;
54 | cursor: pointer;
55 | display: flex;
56 | align-items: center;
57 | justify-content: center;
58 | transition: background-color 0.3s;
59 | }
60 | .stop-button:hover {
61 | background-color: #ff0000;
62 | }
63 |
64 |
65 | [data-testid="stChatMessage"] {
66 | max-width: 100%; /* Use a percentage of the viewport width */
67 | width: auto; /* Let the width be determined by content */
68 | margin-left: 4vw;
69 | margin-right: 5vw;
70 | overflow-wrap: break-word; /* Break long words to prevent overflow */
71 | word-wrap: break-word; /* Legacy support for smaller browsers */
72 | overflow: hidden; /* Hide overflowed text */
73 | }
74 |
75 | .block-container {
76 | padding-top: 1.5rem;
77 | padding-bottom: 0rem;
78 | padding-left: 0.5rem;
79 | padding-right: 1.5em;
80 | }
81 | .main .block-container {
82 | max-width: 100%;
83 | width: 100%;
84 | padding: 0rem 0rem;
85 | }
86 | .stApp {
87 | margin: 0 auto;
88 | }
89 |
90 | /* For user messages
91 | .st-emotion-cache-janbn0 {
92 | flex-direction: row-reverse;
93 | text-align: right;
94 | }*/
95 | .stChatInput textarea, div[data-testid="stChatInput"] textarea {
96 | caret-color: #e0e0e0 !important;
97 | }
98 |
99 | [data-testid="stButton"] button {
100 | position: fixed;
101 | bottom: 55px;
102 | font-size: 35px;
103 | height: 38px;
104 | width: 38px;
105 | margin-right: -62px;
106 | display: flex;
107 | align-items: center;
108 | justify-content: center;
109 | transition: all 5s ease-out; /* Smooth transitions */
110 | background-color: #2a2c2c;
111 | z-index: 999; /* Ensure button stays on top */
112 | }
113 |
114 | [data-testid="stButton"] button:hover {
115 | position: fixed;
116 | transition: all 5s ease-out; /* Smooth transitions */
117 | left: 10px;
118 | bottom: 30px;
119 | background-color: #2a2c2c;
120 | }
121 |
122 | .stChatInput, div[data-testid="stChatInput"] {
123 | height: auto !important;
124 | min-height: 40px !important;
125 | padding: 15px !important;
126 | border-radius: 12px !important;
127 | background-color: #2a2c2c !important;
128 | box-shadow: 0 1px 5px rgba(1,0,0,0.3) !important;
129 | margin-top: -20px;
130 | margin-bottom: -25px;
131 | }
132 |
133 | [data-testid="stChatInputTextArea"]::placeholder {
134 | color: rgba(224, 225, 225, 0.856) !important;
135 | }
136 |
137 | [data-testid="stChatInputFileUploadButton"] svg {
138 | fill: #8c9494 !important;
139 | }
140 |
141 | /* Style the inner container - new in 1.42.2 */
142 | .st-emotion-cache-10nh6ap, div[data-testid="stChatInput"] > div {
143 | display: flex !important;
144 | flex-direction: column !important;
145 | width: 100% !important;
146 | border: none !important;
147 | box-shadow: none !important;
148 | background-color: transparent !important;
149 | width: 95% !important;
150 | max-width: 100% !important;
151 | height: auto !important;
152 | min-height: 40px !important;
153 | position: relative !important;
154 | }
155 |
156 | .st-emotion-cache-nbth8s {
157 | position: absolute !important;
158 | left: 10px !important;
159 | bottom: 20px !important;
160 | z-index: 10 !important;
161 | order: 2 !important;
162 | }
163 |
164 | /* Reposition the file upload button container */
165 | [data-testid="stChatInputFileUploadButton"] {
166 | margin-right: 10px !important;
167 | border: none !important;
168 | position: absolute !important;
169 | bottom: -16px !important;
170 | z-index: 10 !important;
171 | order: 2 !important;
172 | }
173 |
174 | /* Reposition line next 2 file upload button */
175 | .st-emotion-cache-g39xc6 {
176 | margin-left: 29px !important;
177 | margin-right: 0px !important;
178 | border: none !important;
179 | position: absolute !important;
180 | bottom: -20px !important;
181 | background-color: transparent !important;
182 | z-index: 10 !important;
183 | order: 2 !important;
184 | min-height: 40px !important;
185 | }
186 |
187 | /* Style the textarea container */
188 | .stChatInput [data-baseweb="textarea"] {
189 | border: none !important;
190 | box-shadow: none !important;
191 | background-color: transparent !important;
192 | width: 95% !important;
193 | max-width: 100% !important;
194 | order: 1 !important;
195 | }
196 |
197 | /* Style the actual textarea element */
198 | [data-testid="stChatInputTextArea"] {
199 | min-height: 40px !important;
200 | max-height: 100px !important;
201 | width: 100% !important;
202 | max-width: 100% !important;
203 | overflow-y: auto !important;
204 | overflow-x: hidden !important;
205 | padding: 12px !important;
206 | padding-left: 35px !important; /* Make room for the attachment button */
207 | font-size: 16px !important;
208 | line-height: 1.5 !important;
209 | box-shadow: none !important;
210 | word-wrap: break-word !important;
211 | white-space: pre-wrap !important;
212 | }
213 |
214 | /* Style the base input container */
215 | [data-baseweb="base-input"] {
216 | border: none !important;
217 | width: 100% !important;
218 | max-width: 96% !important;
219 | box-shadow: none !important;
220 | background-color: transparent !important;
221 | min-height: 10px !important;
222 | max-height: 100px !important;
223 | }
224 |
225 | /* Style the button container - new in 1.42.2 */
226 | .st-emotion-cache-sey4o0 {
227 | align-self: flex-end !important;
228 | margin-top: -30px !important;
229 | z-index: 5 !important;
230 | }
231 |
232 | /* Adjust the send button position */
233 | [data-testid="stChatInputSubmitButton"] {
234 | align-self: flex-end !important;
235 | margin-bottom: -27px !important;
236 | box-shadow: 0 1px 0px rgba(0,0,0,0.1) !important;
237 | }
--------------------------------------------------------------------------------
/src/static/mian_area.css:
--------------------------------------------------------------------------------
1 | /* main_area.css */
2 |
3 | /* Global font and styling improvements */
4 | body {
5 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif !important;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | color: #1A1A1A;
9 | }
10 |
11 | /* Main container styling */
12 | .main .block-container {
13 | max-width: 1200px;
14 | margin: 0 auto;
15 | padding: 1rem 2rem;
16 | height: 100vh;
17 | overflow-y: auto;
18 | }
19 |
20 | /* Perplexity-inspired clean chat container */
21 | .chat-container {
22 | display: flex;
23 | flex-direction: column;
24 | background-color: transparent;
25 | border-radius: 10px;
26 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05);
27 | margin-top: 0px;
28 | overflow: hidden;
29 | }
30 |
31 | /* Message styling with Perplexity-inspired clean look */
32 | .message {
33 | padding: 1.25rem;
34 | margin: 0.75rem 0;
35 | border-radius: 12px;
36 | width: 100%;
37 | line-height: 1.6;
38 | position: relative;
39 | animation: fadeIn 0.3s ease-in-out;
40 | background-color: #2a2d2d;
41 | border: 1px solid rgba(255, 255, 255, 0.1);
42 | }
43 |
44 | @keyframes fadeIn {
45 | from { opacity: 0; transform: translateY(10px); }
46 | to { opacity: 1; transform: translateY(0); }
47 | }
48 |
49 | .user-message {
50 | background-color: #292c2c;
51 | color: #e0e0e0;
52 | }
53 |
54 | .ai-message {
55 | background-color: #2a2d2d;
56 | color: #e0e0e0;
57 | border: 1px solid rgba(255, 255, 255, 0.1);
58 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
59 | }
60 |
61 | /* Perplexity-inspired empty chat state */
62 | .empty-chat-container {
63 | display: flex;
64 | flex-direction: column;
65 | justify-content: center;
66 | align-items: center;
67 | padding: 1rem 1rem;
68 | text-align: center;
69 | margin-top: -4em;
70 | animation: fadeInUp 0.6s ease-out;
71 | }
72 |
73 | @keyframes fadeInUp {
74 | from { opacity: 0; transform: translateY(20px); }
75 | to { opacity: 1; transform: translateY(0); }
76 | }
77 |
78 | .welcome-text {
79 | font-size: 1.8rem;
80 | font-weight: 600;
81 | color: #9ca1af !important;
82 | margin-top: -2.5em;
83 | margin-bottom: 1.5rem;
84 | }
85 |
86 | .welcome-subtext {
87 | font-size: 1.1rem;
88 | color: #8c9494 !important;
89 | max-width: 600px;
90 | margin-bottom: 3rem;
91 | }
92 |
93 | /* Custom styling for inline code */
94 | .welcome-subtext code {
95 | background-color: #323535;
96 | padding: 2px 5px;
97 | border-radius: 4px;
98 | font-family: monospace;
99 | font-size: 0.9em;
100 | color: #e83e8c;
101 | font-weight: 500;
102 | }
103 |
104 | /* Enhanced chat input container */
105 | .chat-input-container {
106 | width: 80%;
107 | max-width: 800px;
108 | margin: 0 auto;
109 | transition: all 0.3s ease;
110 | }
111 |
112 | /* Compact feature cards with pastel colors */
113 | .feature-cards-container {
114 | display: flex;
115 | justify-content: center;
116 | gap: 16px;
117 | margin: 1rem 0;
118 | flex-wrap: wrap;
119 | }
120 |
121 | .feature-card {
122 | background: #2a2d2d;
123 | border-radius: 12px;
124 | padding: 1rem;
125 | width: 220px;
126 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
127 | text-align: center;
128 | transition: all 0.3s ease;
129 | border: 1px solid rgba(255, 255, 255, 0.1);
130 | position: relative;
131 | overflow: hidden;
132 | }
133 |
134 | /* Enhanced background colors for feature cards */
135 | .feature-card:nth-child(1) {
136 | background: #2a2d2d;
137 | border-bottom: 4px solid #2f747a;
138 | margin-bottom: 1rem;
139 | }
140 |
141 | .feature-card:nth-child(2) {
142 | background: #2a2d2d;
143 | border-bottom: 4px solid #2f747a;
144 | margin-bottom: 1rem;
145 | }
146 |
147 | .feature-card:nth-child(3) {
148 | background: #2a2d2d;
149 | border-bottom: 4px solid #2f747a;
150 | margin-bottom: 1rem;
151 | }
152 |
153 | .feature-card:hover {
154 | transform: translateY(-6px) scale(1.02);
155 | box-shadow: 0 10px 25px rgba(0, 0, 0, 0.06);
156 | }
157 |
158 | /* Add subtle card decoration */
159 | .feature-card::after {
160 | content: '';
161 | position: absolute;
162 | top: 0;
163 | right: 0;
164 | width: 60px;
165 | height: 60px;
166 | background: rgba(255, 255, 255, 0.1);
167 | border-radius: 0 0 0 60px;
168 | opacity: 0;
169 | transition: all 0.3s ease;
170 | }
171 |
172 | .feature-card:hover::after {
173 | opacity: 1;
174 | }
175 |
176 | .feature-icon {
177 | font-size: 1.8rem;
178 | margin-bottom: 0.8rem;
179 | background: #191a1a;
180 | width: 50px;
181 | height: 50px;
182 | display: flex;
183 | align-items: center;
184 | justify-content: center;
185 | border-radius: 50%;
186 | margin-left: auto;
187 | margin-right: auto;
188 | box-shadow: 0 3px 8px rgba(0, 0, 0, 0.06);
189 | }
190 |
191 | .feature-title {
192 | font-weight: 600;
193 | font-size: 1.1rem;
194 | margin-bottom: 0.75rem;
195 | color: #e0e0e0ea;
196 | }
197 |
198 | .feature-description {
199 | font-size: 0.9rem;
200 | color: #9ca3af;
201 | line-height: 1.5;
202 | }
203 |
204 | /* Enhanced example section */
205 | .examples-container {
206 | background-color: #2a2d2d;
207 | border-radius: 12px;
208 | padding: 1.2rem;
209 | margin: 1rem auto;
210 | max-width: 800px;
211 | border: 1px solid rgba(255, 255, 255, 0.1);
212 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
213 | }
214 |
215 | /* Perplexity-inspired heading styles */
216 | .section-heading {
217 | font-size: 1.25rem;
218 | font-weight: 600;
219 | color: #1A1A1A;
220 | margin-bottom: 1rem;
221 | letter-spacing: -0.01em;
222 | }
223 |
224 | /* Responsive adjustments */
225 | @media screen and (max-width: 992px) {
226 | .chat-input-container, .empty-chat-container, .chat-container, .examples-container {
227 | max-width: 95%;
228 | }
229 | }
230 |
231 | @media screen and (max-width: 768px) {
232 | .welcome-text {
233 | font-size: 1.5rem;
234 | }
235 |
236 | .feature-cards-container {
237 | flex-direction: column;
238 | align-items: center;
239 | }
240 |
241 | .feature-card {
242 | width: 100%;
243 | max-width: 280px;
244 | }
245 | }
246 |
247 | /* Perplexity-inspired footer */
248 | .app-footer {
249 | display: flex;
250 | justify-content: center;
251 | padding: 1rem 0;
252 | margin-top: 0.75rem;
253 | color: #ad9b8d;
254 | font-size: 0.875rem;
255 | }
256 |
257 | .footer-link {
258 | color: #ad9b8d;
259 | margin: 0 0.5rem;
260 | text-decoration: none;
261 | transition: color 0.2s ease;
262 | }
263 |
264 | .footer-link:hover {
265 | color: #22808e;
266 | text-decoration: underline;
267 | }
268 |
269 | /* WELCOME TEXT */
270 |
271 | .welcome-about {
272 | font-size: 1rem;
273 | color: #9ca3af;
274 | margin-top: -1.5em;
275 | margin-bottom: -5em;
276 | }
277 |
278 | .logo-container-main {
279 | display: flex;
280 | align-items: center;
281 | justify-content: center;
282 | margin: 5px auto -10px auto;
283 | }
284 |
285 | .image-main {
286 | width: 96%; /* Reduced size to fit inline */
287 | height: 96%; /* Reduced size to fit inline */
288 | transform-style: preserve-3d;
289 | z-index: 9999;
290 | overflow: hidden;
291 | display: inline-block;
292 | vertical-align: middle;
293 | margin-top: 2%;
294 | margin-bottom: -5%;
295 | margin-right: -10px;
296 | }
297 |
298 | /* This rule targets the sidebar when its aria-expanded attribute is false */
299 | [data-testid="stSidebar"][aria-expanded="false"] .note-ai {
300 | display: none;
301 | }
--------------------------------------------------------------------------------
/src/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from langchain_core.messages import (
3 | AIMessage,
4 | BaseMessage,
5 | HumanMessage,
6 | ToolMessage,
7 | )
8 | from langchain_core.prompts import ChatPromptTemplate
9 | from langchain_core.runnables import RunnableLambda, RunnableWithFallbacks
10 | from langchain_core.tools import tool
11 | from langchain_community.agent_toolkits import SQLDatabaseToolkit
12 | from langchain_community.document_loaders import PyPDFLoader
13 | from langchain_community.embeddings import HuggingFaceEmbeddings
14 | from langchain_community.utilities import SQLDatabase
15 | from langchain_community.vectorstores import FAISS
16 | from langgraph.graph import END, StateGraph, START
17 | from langgraph.graph.message import AnyMessage, add_messages
18 | from langgraph.prebuilt import ToolNode
19 | from config.config_loader import ConfigLoader # CUSTOM CLASS
20 | from models.model_loader import ModelLoader # CUSTOM CLASS
21 | from utils.constants import TASK_OPTIONS #, DEFAULT_MODEL_PARAMS # CUSTOM CLASS/PARAMS
22 | from utils.paths import PathManager # CUSTOM CLASS
23 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
24 | from IPython.display import Image, display
25 | from langchain.text_splitter import RecursiveCharacterTextSplitter
26 | from langchain.chains import ConversationalRetrievalChain
27 | from langchain_core.pydantic_v1 import BaseModel, Field
28 | from langchain_core.runnables import RunnablePassthrough
29 | # from ollama_functions_custom import OllamaFunctions
30 | from pydantic import BaseModel
31 | from sentence_transformers import SentenceTransformer
32 | from tabulate import tabulate
33 | from typing import (
34 | Annotated,
35 | Dict,
36 | List,
37 | Literal,
38 | Optional,
39 | TypedDict,
40 | )
41 | import base64
42 | import contextlib
43 | import faiss
44 | import gc
45 | import getpass
46 | import io
47 | import json
48 | import matplotlib.pyplot as plt
49 | import numpy as np
50 | import os
51 | import pandas as pd
52 | import pickle
53 | import plotly.express as px
54 | import plotly.graph_objects as go
55 | import plotly.io as pio
56 | import polars as pl
57 | import pyarrow.feather as feather
58 | import pyarrow.parquet as pq
59 | import re
60 | import sys
61 | import tempfile
62 | import textwrap
63 | import threading
64 | import time
65 | import torch
66 | import traceback
67 | import yaml
68 | # from sql_utility import list_tables_tool
69 | # from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool, InfoSQLDatabaseTool, ListSQLDatabaseTool, QuerySQLCheckerTool
70 | # from tqdm.autonotebook import tqdm, trange
71 | # from langchain_groq import ChatGroq
72 | # from langchain_ollama import ChatOllama
73 | # from langchain_community.chat_models import ChatLlamaCpp
74 | # from langchain_core.runnables import RunnablePassthrough
75 |
76 | # Define global variables at the module level
77 | # os.environ["POLARS_MAX_THREADS"] = "1"
78 |
79 | # PAUSE
80 | Pause_productivity_Analysis = True
81 | model_embeddings, llm_agent = None, None
82 |
83 | # LLMs
84 |
85 | if 'second_task' not in st.session_state:
86 | st.session_state.second_task = False
87 |
88 |
89 | @st.cache_resource(show_spinner="Loading models...")
90 | def load_models():
91 | """
92 | Load models
93 | """
94 | # --- Progress Indicators ---
95 | progress_bar = st.progress(0)
96 | progress_text = st.empty()
97 |
98 | # --- Load LLMs ---
99 | progress_text.text("Loading LLMs...")
100 | print("Loading LLMs...") # Debug print
101 |
102 | gc.collect()
103 | torch.cuda.empty_cache()
104 |
105 | progress_bar.progress(25)
106 |
107 | print('---')
108 | progress_bar.progress(50)
109 |
110 | print('---')
111 | progress_bar.progress(100)
112 |
113 | # --- Cleanup ---
114 | progress_text.empty()
115 | progress_bar.empty()
116 |
117 | print("--- Finished load_models ---") # Debug print
118 |
119 | model_embeddings, llm_agent = None, None
120 | return model_embeddings, llm_agent
121 |
122 | def initialize_session_state():
123 | if 'stop_streaming' not in st.session_state:
124 | st.session_state.stop_streaming = False
125 | if "messages" not in st.session_state:
126 | st.session_state.messages = []
127 | if "statistics" not in st.session_state:
128 | st.session_state.statistics = []
129 | if "context" not in st.session_state:
130 | st.session_state.context = ""
131 |
132 | def setup_sidebar():
133 | with open("./assets/logo.png", "rb") as image_file:
134 | base64_image = base64.b64encode(image_file.read()).decode("utf-8")
135 |
136 | with open("./src/static/about.css") as f:
137 | st.markdown(f"", unsafe_allow_html=True)
138 |
139 | with open("./src/static/sidebar.css") as f:
140 | st.sidebar.markdown(f"", unsafe_allow_html=True)
141 |
142 | logo_and_text = f"""
143 |
144 |

145 |
146 |
147 | """
148 | st.sidebar.markdown(logo_and_text, unsafe_allow_html=True)
149 | st.sidebar.markdown("", unsafe_allow_html=True)
150 | st.sidebar.markdown("", unsafe_allow_html=True)
151 |
152 | return st.sidebar.selectbox(options = TASK_OPTIONS, label='', label_visibility="collapsed")
153 |
154 | def set_about_section():
155 | with st.sidebar.expander("About"):
156 | with open("./src/static/about.css") as f:
157 | st.markdown(f"", unsafe_allow_html=True)
158 | with open("./src/static/about.html") as f:
159 | st.markdown(f"{f.read()}", unsafe_allow_html=True)
160 |
161 | with open("./src/static/note_ai.css") as f:
162 | st.markdown(f"", unsafe_allow_html=True)
163 | st.sidebar.markdown("""Occasionally, AI workflows may produce unexpected results.
""", unsafe_allow_html=True)
164 |
165 | def main():
166 | try:
167 | st.set_page_config(
168 | page_title="productivity Chatbot | Acrivon Phosphoproteomics",
169 | page_icon="./assets/logo.png",
170 | layout="wide",
171 | initial_sidebar_state="expanded"
172 | )
173 | except:
174 | pass
175 |
176 | if 'inner_auto' not in st.session_state:
177 | st.session_state.inner_auto = False
178 |
179 | print("--- Starting main function ---")
180 | load_success = False
181 | try:
182 | print("Calling load_models_dataframe...")
183 | model_embeddings, llm_agent = load_models() # Ensure globals are set
184 | load_success = True
185 | except Exception as e:
186 | st.error(f"Fatal error during resource loading: {e}")
187 | st.exception(e) # Show full traceback in Streamlit for detailed debugging
188 | print(f"Exception during load_models_dataframe call: {e}") # Debug
189 | st.stop() # Stop the Streamlit app execution
190 |
191 | with open("./app/style.css") as css:
192 | st.markdown(f'', unsafe_allow_html=True)
193 |
194 | # Read and encode the logo image
195 | with open("./assets/logo.png", "rb") as f:
196 | logo_base64 = base64.b64encode(f.read()).decode()
197 |
198 | path_manager = PathManager()
199 |
200 | try:
201 | initialize_session_state()
202 | selected_task = setup_sidebar()
203 |
204 | with open("./src/static/mian_area.css") as f:
205 | st.markdown(f"", unsafe_allow_html=True)
206 | set_about_section()
207 |
208 | if 'first_question' not in st.session_state or len(st.session_state.first_question) == 0:
209 | with open("./src/static/main_chat_input.css") as f:
210 | st.markdown(f"", unsafe_allow_html=True)
211 | col1, col2, col3 = st.columns([1, 16, 1])
212 | files_exist = False
213 | with col2:
214 | st.markdown("""
215 |
216 |
Welcome to Synapse Workflows
217 |
218 | Interact with powerful AI Agent Workflows using natural language. Try running
219 | Smart Search Agent, Productivity Assistant, or Data Analysis Agent.
220 | Ready to explore the world of intelligent agents?
221 |
222 |
223 | """, unsafe_allow_html=True)
224 |
225 | if selected_task == "Smart Search Agent":
226 | total_input = st.chat_input(
227 | key="initial_query",
228 | placeholder="Search or Ask anything...",
229 | )
230 |
231 | user_input = total_input
232 | elif selected_task == "Auto Selection":
233 | total_input = st.chat_input(
234 | key="initial_query",
235 | accept_file = "multiple",
236 | file_type = ["pdf", "jpg", "jpeg", "png", "doc", "docx"],
237 | placeholder = (
238 | "Ask anything...\n\nNot sure which agent you need? "
239 | "Type your question or upload files, and Synapse will auto-select the best workflow for you!"
240 | ),
241 | )
242 |
243 | if total_input:
244 | if hasattr(total_input, 'text') and total_input.text:
245 | user_input = total_input.text
246 | if hasattr(total_input, 'files') and total_input.files:
247 | files_collected = total_input['files']
248 | files_exist = True
249 | else:
250 | total_input = st.chat_input(
251 | key="initial_query",
252 | accept_file = "multiple",
253 | file_type = ["pdf", "jpg", "jpeg", "png", "doc", "docx"],
254 | placeholder="Ask Anything...",
255 | )
256 |
257 | if total_input:
258 | if hasattr(total_input, 'text') and total_input.text:
259 | user_input = total_input.text
260 | if hasattr(total_input, 'files') and total_input.files:
261 | files_collected = total_input['files']
262 | files_exist = True
263 |
264 | if total_input:
265 | if 'first_question' not in st.session_state:
266 | st.session_state.first_question = []
267 |
268 | if not files_exist:
269 | st.session_state.first_question.append({"role": "initializer", "content": user_input, "file": None})
270 | else:
271 | st.session_state.first_question.append({"role": "initializer", "content": user_input, "file": files_collected})
272 |
273 | st.rerun()
274 |
275 | # Load custom HTML layout
276 | with open("./src/static/static_feature_card.html", "r", encoding="utf-8") as f:
277 | html_content = f.read()
278 | st.markdown(html_content, unsafe_allow_html=True)
279 |
280 | from streamlit_extras.bottom_container import bottom
281 |
282 | with bottom():
283 | with open("./src/static/links.html", "r", encoding="utf-8") as f:
284 | html_content = f.read()
285 | st.markdown(
286 | f"""
287 |
288 | {html_content}
289 |
290 | """,
291 | unsafe_allow_html=True
292 | )
293 |
294 | else:
295 | task = selected_task
296 | if selected_task=='Auto Selection' and 'global_task' not in st.session_state:
297 | st.session_state.global_task = "Auto Selection"
298 | elif 'global_task' in st.session_state and st.session_state.global_task == "Inner Auto Selection":
299 | pass
300 |
301 | if 'stop_streaming' not in st.session_state:
302 | st.session_state.stop_streaming = False
303 |
304 | if "messages" not in st.session_state:
305 | st.session_state.messages = []
306 | st.session_state.context = ""
307 |
308 | from PIL import Image
309 | def process_avatar(image_path, output_size=200):
310 | try:
311 | img = Image.open(image_path)
312 | if img.mode != "RGBA":
313 | img = img.convert("RGBA")
314 |
315 | # Create a square canvas with transparency
316 | canvas = Image.new("RGBA", (output_size, output_size), (0, 0, 0, 0))
317 |
318 | # Maintain aspect ratio
319 | width, height = img.size
320 | ratio = min(output_size / width, output_size / height)
321 | new_size = (int(width * ratio), int(height * ratio))
322 |
323 | # Resize the image while maintaining aspect ratio
324 | resized = img.resize(new_size, Image.LANCZOS)
325 |
326 | # Center the resized image on the canvas
327 | x = (output_size - new_size[0]) // 2
328 | y = (output_size - new_size[1]) // 2
329 | canvas.paste(resized, (x, y), resized) # Use the resized image as a mask for transparency
330 |
331 | processed_path = os.path.join("./assets", "processed_" + os.path.basename(image_path))
332 | os.makedirs(os.path.dirname(processed_path), exist_ok=True)
333 | canvas.save(processed_path)
334 |
335 | return processed_path
336 |
337 | except FileNotFoundError:
338 | print(f"Avatar image not found at: {image_path}")
339 | return None
340 | except Exception as e:
341 | print(f"An error occurred: {e}")
342 |
343 | assistant_avatar = process_avatar("./assets/logo.png")
344 | user_avatar = "./assets/logo.png"
345 |
346 | # color_scheme = {
347 | # 'assistant-title': '#CC7900',
348 | # 'human_heading': '#FF9800', # Green for Human Message
349 | # 'ai_heading': '#2196F3', # Blue for AI Message
350 | # 'tool_heading': '#FF9800', # Orange for Tool Message
351 | # 'expander_title': '#673AB7', # Deep Purple for Expander
352 | # 'cyan-title-start': '#22808e', # Use magenta
353 | # 'cyan-title-start-code': '#e83e8c', #
354 | # }
355 | # CSS to style the headings .stop-button {{color: 'tool_heading']}; cursor: pointer; margin-left: 10px;}}
356 |
357 | with open("./src/static/secondpage.css") as f:
358 | st.markdown(f"", unsafe_allow_html=True)
359 |
360 | st.markdown("""
361 |
440 | """, unsafe_allow_html=True)
441 |
442 |
443 | # Process uploaded PDFs
444 | def process_pdf(pdf_list, chunk_size=1000, chunk_overlap=100):
445 | total_chunks = 0
446 | page_content_lengths = []
447 | with st.spinner("📚 Processing your PDFs... Please wait while we extract all that knowledge!"):
448 | for file in pdf_list:
449 | with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp:
450 | tmp.write(file.getvalue())
451 | time.sleep(1)
452 | tmp_path = tmp.name
453 |
454 | loader = PyPDFLoader(tmp_path)
455 | documents = loader.load()
456 |
457 | os.unlink(tmp_path)
458 |
459 | text_splitter = RecursiveCharacterTextSplitter(
460 | chunk_size=1000,
461 | chunk_overlap=200
462 | )
463 | chunks = text_splitter.split_documents(documents)
464 | page_content_lengths.extend([len(chunk.page_content) for chunk in chunks])
465 |
466 | embeddings = model_pdf #HuggingFaceEmbeddings(model_name=str(path_manager.model_paths["EMBEDDINGS_qwen"]))
467 | # print(">>>>", str(path_manager.model_paths["EMBEDDINGS_qwen"]))
468 |
469 | # Create or update vector store
470 | if st.session_state.vectorstore:
471 | st.session_state.vectorstore.add_documents(chunks)
472 | else:
473 | st.session_state.vectorstore = FAISS.from_documents(chunks, embeddings)
474 |
475 | total_chunks += len(chunks)
476 |
477 | fun_facts = [
478 | "Fun fact: The average business document contains only 20% truly unique content.",
479 | "AI Tidbit: These embeddings help your AI understand meaning, not just words!",
480 | "Tech note: Vector databases are how AIs can 'remember' your documents.",
481 | "Curious? Each 'chunk' is like a small puzzle piece of your document's knowledge!"
482 | ]
483 | import random
484 |
485 | st.session_state.pdf_processed = {
486 | "num_chunks": total_chunks,
487 | "fun_fact": random.choice(fun_facts),
488 | "num_documents": len(pdf_list),
489 | "chunk_sizes": page_content_lengths if total_chunks > 0 else []
490 | }
491 |
492 | return total_chunks
493 |
494 | def supervisor_node(user_input: str):
495 | class Router(BaseModel):
496 | """Worker to route to next."""
497 | next: Literal["smart_search", "productivity_assistant", "data_analysis_agent"]
498 |
499 | members = ["smart_search", "productivity_assistant", "data_analysis_agent"]
500 |
501 | system_prompt = f"""You are a supervisor tasked with managing a conversation between the following workers: {members}.
502 | Given the user request, determine which worker should handle it next.
503 |
504 | Return ONLY a JSON object with a single field 'next' that must be one of: "smart_search" or "productivity_assistant" or "data_analysis_agent".
505 |
506 | Example response format:
507 | {{{{
508 | "next": "productivity_assistant"
509 | }}}}
510 |
511 | Routing criteria with detailed examples:
512 | - Route to "smart_search" when the query involves finding information, answering factual questions, or researching topics:
513 | * "What is the latest research on large language models?"
514 | * "Find information about climate change impacts"
515 | * "Who invented the internet?"
516 | * "What are the symptoms of COVID-19?"
517 | * "Search for recent advancements in renewable energy"
518 | * "Find tutorials on Python programming"
519 | * "What happened in world news today?"
520 | * "Look up the definition of quantum computing"
521 |
522 | - Route to "productivity_assistant" when the query involves personal organization, scheduling, reminders, or task management:
523 | * "Create a to-do list for my project"
524 | * "Schedule a meeting for tomorrow at 2pm"
525 | * "Remind me to call John in 3 hours"
526 | * "Draft an email to my team about the upcoming deadline"
527 | * "Help me organize my work schedule"
528 | * "Create a shopping list for dinner ingredients"
529 | * "Set up a daily reminder for medication"
530 | * "Plan my study schedule for next week"
531 |
532 | - Route to "data_analysis_agent" when the query involves analyzing, visualizing, or processing data:
533 | * "Analyze this CSV file of sales data"
534 | * "Create a chart showing revenue trends"
535 | * "Calculate the mean and standard deviation of these numbers"
536 | * "Compare these two datasets and find correlations"
537 | * "Visualize this data as a histogram"
538 | * "Perform sentiment analysis on these customer reviews"
539 | * "Extract insights from this survey data"
540 | * "Identify patterns in this time series data"
541 | * "Generate a report based on these quarterly figures"
542 |
543 | When in doubt about which agent to route to, consider:
544 | - If the query is primarily about finding or retrieving information, use "smart_search"
545 | - If the query is about personal task management or organization, use "productivity_assistant"
546 | - If the query involves working with or making sense of data, use "data_analysis_agent"
547 | """
548 |
549 | from langchain_core.output_parsers import JsonOutputParser
550 | from langchain_core.prompts import PromptTemplate
551 |
552 | parser = JsonOutputParser(pydantic_object=Router)
553 | prompt = PromptTemplate(
554 | template="Answer the user query.\n{system_prompt}\n{format_instructions}\nUser: {input}\n",
555 | input_variables=["system_prompt","input"],
556 | partial_variables={"format_instructions": parser.get_format_instructions()},
557 | )
558 |
559 | try:
560 | router_chain = prompt | llm_router | parser
561 | response = router_chain.invoke({"system_prompt":system_prompt, "input": user_input})
562 | print("Structured response:", response)
563 | return response.get('next', 'smart_search') # Default to smart_search instead of literature
564 | except Exception as e:
565 | print(f"Structured output parsing error: {e}")
566 | # Default to smart_search instead of literature if there's an error
567 | return Router(next="smart_search").next
568 |
569 | # from langchain_core.output_parsers import JsonOutputParser
570 | # prompt = ChatPromptTemplate.from_messages([
571 | # ("system", system_prompt),
572 | # ("user", "{input}")],
573 | # partial_variables={"format_instructions": parser.get_format_instructions()})
574 |
575 | #inner_auto
576 | if st.session_state.global_task in ["Auto Selection", "Inner Auto Selection"]:
577 | user_input = None
578 | # col1, col2 = st.columns([0.75, 13]) - THIS WILL STOP AUTO-SCROLL
579 | if st.session_state.first_question and st.session_state.first_question[-1]["role"] == "initializer" and len(st.session_state.first_question) % 2 == 1:
580 | question = st.session_state.first_question[-1]["content"]
581 | else:
582 | question = None
583 |
584 | # if st.session_state.second_task or :
585 | try:
586 | user_input = None
587 | total_input = st.chat_input(
588 | key="inner_auto_selection",
589 | accept_file = "multiple",
590 | file_type = ["pdf", "jpg", "jpeg", "png", "doc", "docx"],
591 | placeholder="Enter your query for either Smart Web Search or Productivity Agent or Data Analysis Agent...",
592 | )
593 | if total_input:
594 | if hasattr(total_input, 'text') and total_input.text:
595 | user_input = total_input.text
596 | else:
597 | user_input = "dummy"
598 | if hasattr(total_input, 'files') and total_input.files:
599 | files_collected = total_input['files']
600 | try:
601 | chunk_count = process_pdf(files_collected)
602 | print(">>>>>>Embedding")
603 | print(chunk_count)
604 | except:
605 | pass
606 | else:
607 | files_collected = None
608 |
609 | try:
610 | print("new input")
611 | print(user_input)
612 | if not user_input:
613 | user_input = question
614 | if user_input:
615 | print(f'============ {user_input}')
616 | route_ans = supervisor_node(user_input)
617 | print(f'=========== {route_ans}')
618 | try:
619 | task = "Productivity Assistant" if route_ans == "literature" or files_collected else "sql agent"
620 | except:
621 | task = "Productivity Assistant" if route_ans == "literature" else "sql agent"
622 |
623 | print(">>>> Inner Auto Selection Final result:", route_ans)
624 | st.session_state.inner_auto = True
625 | except Exception as e:
626 | route_ans = "literature"
627 | print(f"Error Inner/Auto Selection: {e}")
628 | task = "Productivity Assistant"
629 | except Exception as e:
630 | print(f"Error Inner/Auto Selection: {e}")
631 | st.error(f"Error Inner/Auto Selection: {e}")
632 | route_ans = "literature"
633 | task = "Productivity Assistant"
634 |
635 | if task == "Productivity Assistant":
636 | # user_input = st.chat_input("Enter your Biomedical Queries for Coherent literature response...")
637 | stop_button = st.button("▣", key="stop_button", help="Stop the current task")
638 | try:
639 | if pdf_content:
640 | pass
641 | except UnboundLocalError:
642 | pdf_content = None
643 |
644 | try:
645 | if files_collected:
646 | pass
647 | except UnboundLocalError:
648 | files_collected = None
649 |
650 | if "vectorstore" not in st.session_state:
651 | st.session_state.vectorstore = None
652 |
653 | if (not st.session_state.first_question or st.session_state.inner_auto) and ('user_input' in locals() or 'user_input' in globals()):#new
654 | print("inner_loop")
655 | print(user_input)
656 | pass
657 | elif st.session_state.first_question and st.session_state.first_question[-1]["role"] == "initializer" and len(st.session_state.first_question) % 2 == 1:
658 | print("first_question")
659 | user_input = st.session_state.first_question[-1]["content"]
660 | files_collected = st.session_state.first_question[-1]["file"]
661 | if files_collected:
662 | chunk_count = process_pdf(files_collected)
663 | st.session_state.first_question[-1]["role"] = "processed"
664 | if st.session_state.global_task not in ["Auto Selection", "Inner Auto Selection"]:#new
665 | placeholder_ip = st.chat_input(
666 | key="literature_query",
667 | accept_file = "multiple",
668 | file_type = ["pdf", "jpg", "jpeg", "png", "doc", "docx"],
669 | placeholder="Enter your query for general-purpose AI model...",
670 | )
671 | else:
672 | print("> not first_question")
673 | if st.session_state.global_task not in ["Auto Selection", "Inner Auto Selection"]:#new
674 | user_input = None
675 | total_input = st.chat_input(
676 | key="literature_query_continue",
677 | accept_file = "multiple",
678 | file_type = ["pdf", "jpg", "jpeg", "png", "doc", "docx"],
679 | placeholder="Enter your query for general-purpose AI model...",
680 | )
681 |
682 | if total_input:
683 | if hasattr(total_input, 'text') and total_input.text:
684 | user_input = total_input.text
685 | else:
686 | user_input = "dummy"
687 | if hasattr(total_input, 'files') and total_input.files:
688 | files_collected = total_input['files']
689 | try:
690 | chunk_count = process_pdf(files_collected)
691 | print(">>>>>>Embedding")
692 | print(chunk_count)
693 | except:
694 | pass
695 | else:
696 | files_collected = None
697 | try:
698 | if user_input:
699 | st.session_state.messages.append({"role": "user", "content": user_input})
700 |
701 | assistant_avatar = process_avatar("./assets/logo.png") # Original in assets folder
702 | user_avatar = "./assets/logo.png"
703 |
704 | with st.chat_message("user", avatar=user_avatar):
705 | st.markdown(user_input)
706 |
707 | with st.chat_message("assistant", avatar=assistant_avatar):
708 | # First display PDF processing information if available
709 | if hasattr(st.session_state, "pdf_processed"):
710 | pdf_info = st.session_state.pdf_processed
711 | st.write(f"✅ **Successfully processed your PDF into {pdf_info['num_chunks']} knowledge chunks!**")
712 | with st.expander("📚 **Chunks Visualization** - gte-Qwen2-1.5B-instruct", expanded=False):
713 | st.write(f":bulb: *{pdf_info['fun_fact']}*")
714 | if pdf_info['num_chunks'] > 0:
715 | st.write(f"✅ **Added your PDF into VectorDB which might contains other documents of the session.**")
716 | st.write("Here's how your document was divided:")
717 | st.bar_chart(pdf_info['chunk_sizes'])
718 | st.markdown("---")
719 | heading_relevant_chunk_placeholder = st.empty()
720 | relevant_chunk_placeholder = st.empty()
721 |
722 | # Clear the PDF processed info to avoid showing it again
723 | del st.session_state.pdf_processed
724 |
725 | st.session_state["final_output"] = ""
726 | st.session_state["is_expanded"] = False
727 | st.session_state["full_response"] = ""
728 | st.session_state.stop_streaming = False
729 | start_time = time.time()
730 | # Render expander only during processing
731 | # if not st.session_state.processing_done:
732 | full_response = ""
733 |
734 | with st.expander(":bulb: **Hide Reasoning** - Model XYZ", expanded=True):
735 | thinking_placeholder = st.empty()
736 | final_placeholder = st.empty()
737 |
738 | if files_collected and st.session_state.vectorstore:
739 | embeddings = model_pdf #HuggingFaceEmbeddings(model_name=str(path_manager.model_paths["EMBEDDINGS_qwen"]))
740 |
741 | if any(keyword in user_input.lower() for keyword in ["abstract", "conclusion", "introduction"]):
742 | relevant_docs = st.session_state.vectorstore.similarity_search(user_input, k=8,
743 | fetch_k=20, # Fetch more documents to consider before selecting top 8
744 | lambda_mult=0.7 # Balance between relevance and diversity
745 | )
746 | res = ""
747 | heading_relevant_chunk_placeholder.markdown(f"Best {len(relevant_docs)} Relevant Chunks:", unsafe_allow_html=True)
748 | for rank, doc in enumerate(relevant_docs, start=1):
749 | content = doc.page_content
750 | res += f"**Rank {rank}:**\n\n{content}\n\n---\n\n"
751 | relevant_chunk_placeholder.markdown(res)
752 | elif any(keyword in user_input.lower() for keyword in ["summarize", "summarise", "summary", "summarization", "explain", "simplify", "simple term"]):
753 | relevant_docs = st.session_state.vectorstore.similarity_search(user_input, k=20, # Number of documents to return
754 | fetch_k=40, # Fetch more documents, then select most diverse k
755 | lambda_mult=0.6 # 60% relevance with 40% diversity (lower is more diverse)
756 | )
757 | res = ""
758 | heading_relevant_chunk_placeholder.markdown(f"Best {len(relevant_docs)} Relevant Chunks:", unsafe_allow_html=True)
759 |
760 | for rank, doc in enumerate(relevant_docs, start=1):
761 | content = doc.page_content
762 | res += f"**Rank {rank}:**\n\n{content}\n\n---\n\n"
763 | relevant_chunk_placeholder.markdown(res)
764 | else:
765 | relevant_docs = st.session_state.vectorstore.similarity_search(user_input, k=5,
766 | fetch_k=15, # Fetch more documents for better selection
767 | lambda_mult=0.7 # 0.7 balances relevance with diversity (higher = more relevance)
768 | )
769 | res = ""
770 | heading_relevant_chunk_placeholder.markdown(f"Best {len(relevant_docs)} Relevant Chunks:", unsafe_allow_html=True)
771 | for rank, doc in enumerate(relevant_docs, start=1):
772 | content = doc.page_content
773 | res += f"**Rank {rank}:**\n\n{content}\n\n---\n\n"
774 | relevant_chunk_placeholder.markdown(res)
775 |
776 |
777 | pdf_context = "\n\n".join([doc.page_content for doc in relevant_docs])
778 | st.session_state.pdf_context = pdf_context
779 | else:
780 | # pdf_context = "" # Ensure it's an empty string when no file is uploaded
781 | pass
782 |
783 |
784 | # Streaming logic
785 | in_thinking = True
786 | raw_question = user_input
787 | user_input += f"""Follow all the rules. **Note:** Always use single backticks `keyword` to highlight important keywords.(e.g. `AI Agents`, `LangGraph`, `1999`)
788 | **Rules:**
789 | - Always include atleast one Table since information can be better presented Factually using **Table** try to include **Table** for compact facts.
790 | - If required use triple dash like ('---') to seperate different sections for better mark-down layout
791 | - Use single or triple backticks `keyword` to highlight important keywords for easy reading and appeal.
792 | - First go through the reasoning steps using appropriate opening and closing tags like ... Actual Response.
793 | """
794 | try:
795 | # Get pdf_context from session state instead of assuming it exists as a local variable
796 | pdf_context = st.session_state.get('pdf_context', '')
797 | if pdf_context:
798 | prompt = get_prompt(user_input, st.session_state.context, pdf_context, pdf=True)
799 | else:
800 | prompt = get_prompt(user_input, st.session_state.context)
801 | except Exception as e:
802 | st.error(f"Error processing PDF context: {e}")
803 | prompt = get_prompt(user_input, st.session_state.context)
804 |
805 | gc.collect()
806 |
807 | torch.cuda.empty_cache()
808 | device_id = 0
809 | # torch.cuda.set_device(device_id)
810 |
811 | if 'full_resoning' not in st.session_state:
812 | st.session_state.full_resoning = ""
813 | else:
814 | st.session_state.full_resoning = ""
815 |
816 | # for chunk in llm_literature.stream(prompt):#, max_tokens=8000, top_k=top_k, top_p=top_p, repeat_penalty=repeat_penalty, temperature=temperature, stream=True):
817 | # if st.session_state.stop_streaming:
818 | # st.session_state.stop_streaming = True
819 | # break
820 |
821 | # chunk_text = chunk # chunk.content
822 | # st.session_state.full_response += chunk_text
823 |
824 | # if in_thinking:
825 | # thinking_placeholder.markdown(st.session_state.full_response + "▌")
826 | # st.session_state.full_resoning += chunk_text
827 |
828 | # if "" in st.session_state.full_response.lower():
829 | # in_thinking = False
830 | # parts = st.session_state.full_response.split("", 1)
831 | # st.session_state.final_output = parts[1] if len(parts) > 1 else ""
832 | # # st.session_state.reasoning = parts[0].replace("", "")
833 | # st.session_state.full_response = parts[0].replace("", "")
834 | # st.session_state.is_expanded = False
835 | # # st.session_state.processing_done = True
836 | # time.sleep(0.05) # Helps sync state changes
837 |
838 | # if not in_thinking and chunk_text != "":
839 | # st.session_state.final_output += chunk_text
840 | # final_placeholder.markdown(st.session_state.final_output)
841 |
842 | # # Add timing stats
843 | # execution_time = time.time() - start_time
844 | # ntokens = len(st.session_state.full_response.split())
845 | # timing_stats = f'Literature ⏳ {execution_time:.2f}s | {(execution_time/ntokens):.3f}s/token | {round((ntokens/execution_time),1)} tokens/s'
846 | # final_placeholder.markdown(f"{st.session_state.final_output}
{timing_stats}", unsafe_allow_html=True)
847 |
848 | # # Update conversation history
849 | # st.session_state.messages.append({"role": "assistant", "reasoning": st.session_state.full_resoning, "content": st.session_state.final_output})
850 |
851 |
852 | st.markdown("""
853 | """, unsafe_allow_html=True)
876 | except Exception as e:
877 | print("Except literature block:")
878 | print(st.session_state.global_task)
879 | import traceback
880 | def get_clean_error_message(e: Exception) -> str:
881 | error_type = type(e).__name__
882 | error_msg = str(e)
883 | tb_last = "".join(traceback.format_exception_only(type(e), e))
884 | return f"{error_type}: {error_msg}\n{tb_last}"
885 | st.markdown(
886 | f"Error details: {get_clean_error_message(e)}\n"
887 | )
888 |
889 | # productivity agent
890 | else:
891 | if (not st.session_state.first_question or st.session_state.inner_auto) and ('user_input' in locals() or 'user_input' in globals()):#new
892 | print("<<<<<<<<<< inner_loop_productivity_db")
893 | print(user_input)
894 | question = user_input
895 | pass
896 | elif st.session_state.first_question and st.session_state.first_question[-1]["role"] == "initializer" and len(st.session_state.first_question) % 2 == 1:
897 | user_input = None #new
898 | question = st.session_state.first_question[-1]["content"]
899 | st.session_state.first_question[-1]["role"] = "processed"
900 | if st.session_state.global_task not in ["Auto Selection", "Inner Auto Selection"]:#new
901 | placeholder_ip = st.chat_input(f"Enter your {db_name} data related Query...")
902 | else:
903 | user_input = None #new
904 | if st.session_state.global_task not in ["Auto Selection", "Inner Auto Selection"]:#new
905 | question = st.chat_input(f"Enter your {db_name} data related Query...")
906 | stop_button = st.button("▣", key="stop_button", help="Stop the current task")
907 | # # Add user message to chat history
908 | if question:
909 | st.session_state.messages.append({"role": "user", "content": question})
910 | assistant_avatar = process_avatar("./assets/logo.png") # Original in assets folder
911 | user_avatar = "./assets/logo.png"
912 |
913 | with st.chat_message("user", avatar=user_avatar):
914 | st.markdown(question)
915 | # with st.chat_message("assistant", avatar=assistant_avatar):
916 |
917 | TEMP, TEMP2 = 0, 0
918 | if st.session_state.global_task in ["Auto Selection", "Inner Auto Selection"]:
919 | print("if block agent")
920 | st.session_state.second_task = True
921 | print(st.session_state.global_task)
922 | print(st.session_state.second_task)
923 | # st.rerun()
924 | else:
925 | print(st.session_state.global_task)
926 | print('else block agent')
927 | except Exception as e:
928 | st.markdown(f'{str(e)}')
929 |
930 | if __name__ == "__main__":
931 | main()
932 |
--------------------------------------------------------------------------------