├── .env.example ├── .github ├── FUNDING.yml └── workflows │ └── python-publish.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── data └── test.db ├── docs ├── TLDR_Cheat_sheet.md ├── alicanto.md ├── api │ ├── image.md │ ├── llm_funcs.md │ ├── npc_compiler.md │ └── npc_sysenv.md ├── guac.md ├── index.md ├── installation.md ├── make.bat ├── npc_cli.md ├── npc_data_layer.md ├── npcpy.md ├── npcsh.md ├── pti.md ├── requirements.txt ├── spool.md ├── wander.md └── yap.md ├── examples ├── data_analyst.npc ├── data_scientist.npc ├── datacollector.npc ├── editor.npc ├── email_assistant.npc ├── example_usages.md ├── factory │ └── models │ │ ├── customer_feedback.sql │ │ └── customer_insights.sql ├── get_llm_response_examples.py ├── journalist.npc ├── market_analyst.npc ├── model_gen.fab ├── morning_routine.pipe ├── news_analysis.pipe ├── news_analysis_mixa.pipe ├── news_assistant.npc ├── npc_team │ ├── analyst.npc │ ├── create_emails_table.py │ ├── create_market_events_table.py │ ├── critic.npc │ ├── email_assistant.npc │ ├── factory │ │ ├── __init__.py │ │ ├── csv_generator.py │ │ ├── data │ │ │ ├── raw_customer_feedback.csv │ │ │ └── raw_orders.csv │ │ ├── example_models │ │ │ ├── customer_feedback.sql │ │ │ └── customer_insights.sql │ │ ├── model_runner.py │ │ ├── models │ │ │ ├── customer_feedback.sql │ │ │ └── customer_insights.sql │ │ ├── morning_routine.pipe │ │ └── npc_project.yml │ └── market_analyst.npc ├── ocr_pipeline.py ├── pipe_examples.md ├── presenter.npc ├── project.ctx ├── researcher.npc ├── server_curl_examples.md └── test_npc_tools_network.py ├── mkdocs.yml ├── npcpy.png ├── npcpy ├── __init__.py ├── data │ ├── __init__.py │ ├── audio.py │ ├── data_models.py │ ├── dataframes.py │ ├── image.py │ ├── load.py │ ├── text.py │ ├── video.py │ └── web.py ├── gen │ ├── __init__.py │ ├── embeddings.py │ ├── image_gen.py │ ├── response.py │ └── video_gen.py ├── llm_funcs.py ├── main.py ├── memory │ ├── __init__.py │ ├── command_history.py │ ├── deep_research.py │ ├── knowledge_graph.py │ ├── memory_integration.py │ ├── repl_memory.py │ ├── search.py │ └── sleep.py ├── migrations │ └── migrate_add_team_v0337.py ├── mix │ ├── __init__.py │ └── debate.py ├── modes │ ├── __init__.py │ ├── _state.py │ ├── alicanto.py │ ├── guac.py │ ├── mcp_npcsh.py │ ├── npc.py │ ├── npcsh.py │ ├── plonk.py │ ├── pti.py │ ├── serve.py │ ├── spool.py │ ├── wander.py │ └── yap.py ├── npc-python.png ├── npc_compiler.py ├── npc_sysenv.py ├── npc_team │ ├── alicanto.npc │ ├── alicanto.png │ ├── assembly_lines │ │ └── test_pipeline.py │ ├── corca.npc │ ├── foreman.npc │ ├── frederic.npc │ ├── frederic4.png │ ├── guac.png │ ├── jinxs │ │ ├── automator.jinx │ │ ├── bash_executer.jinx │ │ ├── calculator.jinx │ │ ├── edit_file.jinx │ │ ├── file_chat.jinx │ │ ├── gui_controller.jinx │ │ ├── image_generation.jinx │ │ ├── internet_search.jinx │ │ ├── local_search.jinx │ │ ├── npcsh_executor.jinx │ │ ├── python_executor.jinx │ │ ├── screen_cap.jinx │ │ └── sql_executor.jinx │ ├── kadiefa.npc │ ├── kadiefa.png │ ├── npcsh.ctx │ ├── npcsh_sibiji.png │ ├── plonk.npc │ ├── plonk.png │ ├── plonkjr.npc │ ├── plonkjr.png │ ├── sibiji.npc │ ├── sibiji.png │ ├── spool.png │ ├── templates │ │ ├── analytics │ │ │ └── celona.npc │ │ ├── hr_support │ │ │ └── raone.npc │ │ ├── humanities │ │ │ └── eriane.npc │ │ ├── it_support │ │ │ └── lineru.npc │ │ ├── marketing │ │ │ └── slean.npc │ │ ├── philosophy │ │ │ └── maurawa.npc │ │ ├── sales │ │ │ └── turnic.npc │ │ └── software │ │ │ └── welxor.npc │ └── yap.png ├── npcs.py ├── npcsh.png ├── routes.py ├── sql │ ├── __init__.py │ ├── model_runner.py │ └── npcsql.py └── work │ ├── __init__.py │ ├── desktop.py │ ├── mcp_helpers.py │ ├── mcp_server.py │ ├── plan.py │ └── trigger.py ├── setup.py ├── test_data ├── books.csv ├── catfight.PNG ├── futuristic_cityscape.PNG ├── generated_imag2e.png ├── generated_image.png ├── generated_image1.png ├── markov_chain.png ├── peaceful_landscape.PNG ├── peaceful_landscape_stable_diff.png ├── r8ss9a.PNG ├── rabbit.PNG ├── russia2.PNG └── yuan2004.pdf └── tests ├── __init__.py ├── dummy_linked_list.py ├── gpt4omini_tts.py ├── knowledge_graph_test.py ├── mcp_tool_test.py ├── postgres_conn_accommodation.py ├── realtime_openai_voiceE_chat.py ├── template_tests └── npc_team │ ├── budgeto.npc │ ├── funnel.npc │ ├── relatio.npc │ ├── slean.npc │ └── turnic.npc ├── test_api.sh ├── test_automator_tool.py ├── test_bash_cli.sh ├── test_chromadb.py ├── test_edit_file_tool.py ├── test_embedding_check.py ├── test_embedding_methods.py ├── test_file_chat_tool.py ├── test_helpers.py ├── test_knowledge_graph_rag.py ├── test_llm_funcs.py ├── test_llm_response.py ├── test_mcp_tool_loading.py ├── test_networkx_vis.py ├── test_npc_compiler.py ├── test_npcsh.py ├── test_npcteam.py ├── test_openai_image_edit.py ├── test_shell_helpers.py ├── test_stream_with_interrupts.py ├── test_tars.py └── test_tool_use.py /.env.example: -------------------------------------------------------------------------------- 1 | # API Keys 2 | OPENAI_API_KEY=your_openai_api_key_here 3 | ANTHROPIC_API_KEY=your_anthropic_api_key_here 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: cagostino 4 | buy_me_a_coffee: npcworldwide 5 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Upload Python Package 3 | 4 | on: 5 | release: 6 | types: [published] 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | deploy: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: '>3.10' 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install build 26 | - name: Build package 27 | run: python -m build 28 | - name: Check token length 29 | run: | 30 | echo "Token length: ${#PYPI_API_TOKEN}" 31 | env: 32 | TWINE_USERNAME: __token__ 33 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 34 | - name: Publish package 35 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 36 | with: 37 | password: ${{ secrets.PYPI_API_TOKEN }} 38 | verify_metadata: true 39 | skip_existing: true 40 | verbose: true 41 | repository_url: https://upload.pypi.org/legacy/ 42 | packages_dir: dist 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.code-workspace 3 | *.png 4 | *.python-version 5 | .env 6 | build/ 7 | *.egg-info 8 | *a.csv 9 | *.pdf 10 | .venv 11 | *.log 12 | *.out 13 | *.safetensors 14 | node_modules/ 15 | __pycache__/ 16 | *.ipynb_checkpoints/ 17 | *.swp 18 | .DS_Store 19 | *.pkl 20 | *.h5 21 | *.npy 22 | *.npz 23 | *.zip 24 | *.tar.gz 25 | *.tar 26 | *.gz 27 | *.tgz 28 | .vscode/mcp.json 29 | .vscode/settings.json 30 | .build.py 31 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-24.04 5 | tools: 6 | python: "3.11" 7 | apt_packages: 8 | - portaudio19-dev 9 | - python3-pyaudio 10 | - ffmpeg 11 | - libcairo2-dev 12 | - libgirepository1.0-dev 13 | - espeak 14 | - alsa-base 15 | - alsa-utils 16 | 17 | mkdocs: 18 | configuration: mkdocs.yml # Changed from sphinx 19 | 20 | python: 21 | install: 22 | - method: pip 23 | path: . 24 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 NPC WORLDWIDE 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include npc_profiles * 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /data/test.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/data/test.db -------------------------------------------------------------------------------- /docs/alicanto.md: -------------------------------------------------------------------------------- 1 | # Alicanto: Deep Thematic Research 2 | 3 | Alicanto is a deep research method inspired by the Chilean mythological bird that can lead miners to gold or to death. In NPC Shell, Alicanto conducts extensive multi-perspective research on any topic by exploring with both breadth (across different expert perspectives) and depth (diving deeper into promising directions). 4 | 5 | ## Overview 6 | 7 | Alicanto creates a team of diverse AI researchers who explore your research question from multiple angles. It then analyzes their findings, identifies thematic clusters, evaluates the quality and risk of each insight, and produces a comprehensive report with "gold insights" (high quality, low risk) and "cliff warnings" (high risk). 8 | 9 | ## Usage 10 | 11 | ```bash 12 | # Basic usage 13 | npc alicanto "What are the implications of quantum computing for cybersecurity?" 14 | 15 | # With more researchers and deeper exploration 16 | npc alicanto "How might climate change impact global food security?" --num-npcs 8 --depth 5 17 | 18 | # Control exploration vs. exploitation balance 19 | npc alicanto "What ethical considerations should guide AI development?" --exploration 0.5 20 | 21 | # Different output formats 22 | npc alicanto "What is the future of remote work?" --format report 23 | ``` 24 | 25 | ## Options 26 | 27 | - `--num-npcs N`: Number of researcher NPCs to use (default: 5) 28 | - `--depth N`: Depth of research chains for each NPC (default: 3) 29 | - `--exploration FLOAT`: Balance between exploration and exploitation (0.0-1.0, default: 0.3) 30 | - `--format FORMAT`: Output format: "report" (default), "summary", or "full" 31 | 32 | ## How it Works 33 | 34 | 1. **Expert Generation**: Creates a diverse team of AI researchers with different expertise and perspectives 35 | 2. **Research Chains**: Each researcher conducts a series of research steps, going deeper with each iteration 36 | 3. **Insight Extraction**: Key insights are extracted from each research chain 37 | 4. **Thematic Grouping**: Insights are grouped into thematic clusters across researchers 38 | 5. **Quality Evaluation**: Each theme is evaluated for quality (novelty, depth, practicality, evidence) and risk 39 | 6. **Gold and Cliff Identification**: High-quality insights with low risk are marked as "gold insights," while high-risk insights are flagged as "cliff warnings" 40 | 7. **Integration**: A comprehensive research report synthesizes findings and provides guidance 41 | 42 | ## Example Output 43 | 44 | The output includes: 45 | - An integrated overview connecting all themes 46 | - The most significant overall findings 47 | - Specific recommendations for further research 48 | - Particular cautions and limitations 49 | - Clearly marked gold insights and cliff warnings 50 | - Detailed thematic findings with quality and risk scores 51 | 52 | ## Use Cases 53 | 54 | - Exploring complex, multifaceted topics 55 | - Getting diverse perspectives on controversial issues 56 | - Identifying promising research directions 57 | - Evaluating the quality and risk of different approaches 58 | - Discovering connections between seemingly unrelated ideas -------------------------------------------------------------------------------- /docs/api/image.md: -------------------------------------------------------------------------------- 1 | # Image Processing 2 | 3 | ::: npcpy.data.image 4 | options: 5 | show_source: true 6 | members: 7 | - capture_screenshot 8 | show_root_heading: false 9 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/api/llm_funcs.md: -------------------------------------------------------------------------------- 1 | # LLM Functions 2 | 3 | ::: npcpy.llm_funcs 4 | options: 5 | show_source: true 6 | members: true 7 | filters: 8 | - "!^_" # Hide private members 9 | - "!^[A-Z]{2,}" # Hide constants (all-caps) 10 | - "!test_" # Hide test functions 11 | inherited_members: false 12 | show_root_heading: false 13 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/api/npc_compiler.md: -------------------------------------------------------------------------------- 1 | # NPC Functions 2 | 3 | ::: npcpy.npc_compiler 4 | options: 5 | show_source: true 6 | members: true 7 | filters: 8 | - "!^_" # Hide private members 9 | - "!^[A-Z]{2,}" # Hide constants (all-caps) 10 | - "!test_" # Hide test functions 11 | inherited_members: false 12 | show_root_heading: false 13 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/api/npc_sysenv.md: -------------------------------------------------------------------------------- 1 | # NPC Sys environment 2 | 3 | ::: npcpy.npc_sysenv 4 | options: 5 | show_source: true 6 | members: true 7 | filters: 8 | - "!^_" # Hide private members 9 | - "!^[A-Z]{2,}" # Hide constants (all-caps) 10 | - "!test_" # Hide test functions 11 | inherited_members: false 12 | show_root_heading: false 13 | show_if_no_docstring: true -------------------------------------------------------------------------------- /docs/guac.md: -------------------------------------------------------------------------------- 1 | (npcsh) caug@pop-os:/media/caug/extradrive1/npcww/npcsh$ guac 2 | gLoaded .env file from /media/caug/extradrive1/npcww/npcsh 3 | hows iloading npc team from directory 4 | Error loading team context: 'Team' object has no attribute 'jinja_env' 5 | filename: guac.npc 6 | filename: toon.npc 7 | filename: parsely.npc 8 | filename: caug.npc 9 | filename: team.ctx 10 | 🥑 hows it going 11 | 12 | # Generated python code: 13 | print("I'm doing well, thank you! How can I assist you with Python today?") 14 | 15 | I'm doing well, thank you! How can I assist you with Python today? 16 | 17 | # Generated code executed successfully 18 | 19 | 🥑 ls 20 | composition.png docs image.png Makefile mkdocs.yml npcpy.egg-info npcsh.code-workspace otter_.png pirate.png setup.py template_tests tests 21 | data examples LICENSE MANIFEST.in npcpy npcpy.png otter.png output_image.png README.md sprite.png test_data 22 | 🥑 cd npcpy 23 | Changed directory to /media/caug/extradrive1/npcww/npcsh/npcpy 24 | 🥑 ls 25 | data __init__.py main.py migrations modes npcsh.png npc_sysenv.py __pycache__ sql 26 | gen llm_funcs.py memory mix npc_compiler.py npcs.py npc_team routes.py work 27 | 🥑 run llm_funcs.py 28 | Items added/modified from llm_funcs.py: 29 | subprocess: 30 | Generator: 31 | PIL: 33 | NPCSH_VIDEO_GEN_PROVIDER: 'diffusers' 34 | generate_image: 35 | NPCSH_IMAGE_GEN_PROVIDER: 'openai' 36 | Union: 37 | requests: 40 | generate_video: 41 | get_system_message: 42 | get_llm_response: 43 | Optional: 44 | FileSystemLoader: 45 | NPCSH_EMBEDDING_MODEL: 'nomic-embed-text' 46 | NPCSH_EMBEDDING_PROVIDER: 'ollama' 47 | NPCSH_VIDEO_GEN_MODEL: 'runwayml/stable-diffusion-v1-5' 48 | NPCSH_VISION_MODEL: 'gpt-4o-mini' 49 | NPCSH_VISION_PROVIDER: 'openai' 50 | sqlite3: 51 | check_llm_command: 52 | NPCSH_REASONING_MODEL: 'deepseek-reasoner' 53 | NPCSH_API_URL: '' 54 | NPCSH_REASONING_PROVIDER: 'deepseek' 55 | execute_llm_command: 56 | lookup_provider: 57 | Dict: 58 | create_engine: 59 | decide_plan: 60 | NPCSH_CHAT_PROVIDER: 'openai' 61 | get_litellm_response: 62 | Any: 63 | Undefined: 64 | NPCSH_IMAGE_GEN_MODEL: 'dall-e-2' 65 | Template: 66 | NPCSH_DEFAULT_MODE: 'chat' 67 | generate_image_litellm: 68 | handle_jinx_call: 69 | List: 70 | Environment: 71 | 72 | 🥑 out = get_llm_response('hello', model='gpt-4.1-mini', provider='openai') 73 | 🥑 print(out) 74 | {'response': 'Hello! How can I assist you today?', 'messages': [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': [{'type': 'text', 'text': 'hello'}]}, {'role': 'assistant', 'content': 'Hello! How can I assist you today?'}], 'raw_response': ModelResponse(id='chatcmpl-BR2eVV5PUAgvyFLSZNUkbSUUG3tdI', created=1745784751, model='gpt-4.1-mini-2025-04-14', object='chat.completion', system_fingerprint='fp_38647f5e19', choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How can I assist you today?', role='assistant', tool_calls=None, function_call=None, provider_specific_fields={'refusal': None}, annotations=[]))], usage=Usage(completion_tokens=10, prompt_tokens=18, total_tokens=28, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0, text_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None)), service_tier='default'), 'tool_calls': []} 75 | 76 | 77 | Guac lets users execute code snippets or to ask LLMs questions which respond by generating and executing code directly within the interpreter. The variables and functions generated during these executions are inspectable to the user. In addition, `guac` is set up to provide users with a sense of cyclicality by progressing from a raw avocado (🥑) through a series of intermediaite steps until it is a gross brown mush (🥘). At this point, the user is asked to refresh, which initiates an LLM review of the session's commands and results and then suggests automations and then after the user reviews them they will be added to the user's `guac` module that is installed locally within the `~/.npcsh/guac/` folder and which eveolves as the user uses it. This refresh period is meant to encourage frequent reviews for users to help them work more efficiently and cognizantly. 78 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/pti.md: -------------------------------------------------------------------------------- 1 | # PTI 2 | 3 | Pardon-the-interruption (PTI) mode is a mode that probes thinking LLMs in the midst of their response so that they ask users for input and clarification earlier rather than later after going down a rabiit hole. 4 | 5 | ```bash 6 | pti 7 | ``` 8 | 9 | 10 | ```pti 11 | user> who is the moon's president? 12 | ``` -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | anthropic 2 | beautifulsoup4 3 | chromadb 4 | colorama 5 | diffusers 6 | duckduckgo-search 7 | flask 8 | flask_cors 9 | flask_sse 10 | google-genai 11 | google-generativeai 12 | gtts 13 | jinja2 14 | kuzu 15 | markdown 16 | matplotlib 17 | mkdocstrings-python 18 | mkdocs-material 19 | nltk 20 | numpy 21 | ollama 22 | opencv-python 23 | openai 24 | openai-whisper 25 | pandas 26 | Pillow 27 | playsound==1.2.2 28 | psycopg2-binary 29 | pyaudio 30 | pyautogui 31 | pygments 32 | PyMuPDF 33 | pyttsx3 34 | python-dotenv 35 | PyYAML 36 | redis 37 | rich 38 | scipy 39 | sentence_transformers 40 | sqlalchemy 41 | termcolor -------------------------------------------------------------------------------- /docs/spool.md: -------------------------------------------------------------------------------- 1 | # Spool 2 | -------------------------------------------------------------------------------- /docs/wander.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/docs/wander.md -------------------------------------------------------------------------------- /docs/yap.md: -------------------------------------------------------------------------------- 1 | # yap 2 | 3 | `yap` is an agentic voice control interface that allows users to interact with NPCs and LLMs using voice commands. This tool represents the evolution of voice interaction in the npcpy framework, replacing the older `/whisper` functionality. 4 | 5 | ## Overview 6 | 7 | The `yap` command launches a voice interaction session where you can speak your queries and commands to your NPCs, and they'll respond in text format. This creates a natural conversational flow while maintaining text-based responses for clarity and reference. 8 | 9 | ## Usage 10 | 11 | ### As a standalone command 12 | 13 | You can launch `yap` directly from your terminal: 14 | 15 | ```bash 16 | yap 17 | ``` 18 | 19 | This starts a voice interaction session with the default NPC. 20 | 21 | ### With a specific NPC 22 | 23 | ```bash 24 | yap -n 25 | ``` 26 | 27 | This starts a voice interaction session with the specified NPC. 28 | 29 | ### From within npcsh 30 | 31 | When using the NPC shell, you can switch to voice control mode: 32 | 33 | ``` 34 | /yap 35 | ``` 36 | 37 | ## How it Works 38 | 39 | 1. When launched, `yap` calibrates to your environment's ambient noise level 40 | 2. It listens for your voice input (indicated by a "Listening..." message) 41 | 3. When you finish speaking, it processes your audio and converts it to text 42 | 4. Your transcribed message is sent to the active NPC/LLM 43 | 5. The response is displayed as text 44 | 6. The system automatically begins listening for your next query 45 | 46 | ## Features 47 | 48 | - **Automatic silence detection**: Stops listening when you stop speaking 49 | - **Maximum duration limit**: Prevents unnecessarily long recordings 50 | - **Adjustable sensitivity**: Can be calibrated for different environments 51 | - **Full NPC integration**: Works with all NPC capabilities, including jinxs and tools 52 | 53 | ## Requirements 54 | 55 | To use `yap`, you need: 56 | 57 | - A working microphone 58 | - The required audio dependencies installed (see the installation guide for your OS) 59 | - Python speech recognition libraries (installed with `pip install npcpy[yap]`) 60 | 61 | ## Example Session 62 | 63 | ``` 64 | $ yap -n simon_bolivar 65 | 66 | Calibrating silence level. Please remain quiet... 67 | Silence threshold set to: 5234.21 68 | Listening... (speak now) 69 | ............... 70 | 71 | Processing... 72 | You said: What territories should we focus on liberating first? 73 | 74 | As the liberator of South America, I believe our primary focus should be on securing New Granada (modern-day Colombia), particularly the areas around Bogotá and Cartagena. These territories are strategically essential for several reasons: 75 | 76 | 1. Controlling New Granada provides access to both the Caribbean Sea and Pacific Ocean 77 | 2. The region has valuable resources to fund our revolutionary efforts 78 | 3. From this position, we can establish a base to launch campaigns southward toward Quito and Peru 79 | 80 | Once New Granada is secured, we should immediately press southward to liberate Quito (Ecuador), which forms a critical geographic link between the northern and southern territories. 81 | 82 | Venezuela, though my homeland, has proven difficult to hold permanently due to Spanish naval power. It will be easier to secure once we control the interior territories. 83 | 84 | Remember, our ultimate goal is not just military victory, but the establishment of Gran Colombia as a unified republic free from Spanish tyranny. 85 | 86 | Listening... (speak now) 87 | ............... 88 | ``` 89 | 90 | ## Troubleshooting 91 | 92 | - **If your microphone isn't detected**: Ensure it's properly connected and your system recognizes it 93 | - **If speech recognition is inaccurate**: Try speaking more clearly or adjusting your distance from the microphone 94 | - **If you're in a noisy environment**: Consider using the `-t` flag to adjust the noise threshold manually 95 | 96 | ## See Also 97 | 98 | - [npcsh - The NPC Shell](npcsh.md) 99 | - [spool - Interactive NPC Chat](spool.md) -------------------------------------------------------------------------------- /examples/data_analyst.npc: -------------------------------------------------------------------------------- 1 | name: data_analyst 2 | primary_directive: Assist users with data analysis tasks, including data loading, plotting, and statistical computations. 3 | model: llama3.2 4 | provider: ollama 5 | jinxs: 6 | - data_loader 7 | - data_plotter 8 | - stats_calculator 9 | - database_query 10 | - sql_executor 11 | - pandas_executor -------------------------------------------------------------------------------- /examples/data_scientist.npc: -------------------------------------------------------------------------------- 1 | name: data_scientist 2 | model: llama3.2 3 | provider: ollama 4 | primary_directive: | 5 | You are an AI-driven data scientist assistant, responsible for analyzing 6 | and interpreting data derived from news articles. Your goal is to provide 7 | actionable insights and support decision-making based on thorough data 8 | analysis. -------------------------------------------------------------------------------- /examples/datacollector.npc: -------------------------------------------------------------------------------- 1 | name: DataCollector 2 | primary_directive: Collect user feedback regarding UI preferences. 3 | jinxs: 4 | - feedback_gatherer 5 | model: claude-3-haiku-latest 6 | provider: anthropic -------------------------------------------------------------------------------- /examples/editor.npc: -------------------------------------------------------------------------------- 1 | name: editor 2 | model: llama3.2 3 | provider: ollama 4 | primary_directive: | 5 | You are an intelligent editing assistant. Your role is to refine and 6 | polish news articles, ensuring clarity, coherence, and adherence to 7 | editorial standards before they are published. 8 | -------------------------------------------------------------------------------- /examples/email_assistant.npc: -------------------------------------------------------------------------------- 1 | name: email_assistant 2 | primary_directive: You are an AI assistant specialized in managing and summarizing emails. You should present the information in a clear and concise manner. 3 | model: llama3.2 4 | provider: ollama -------------------------------------------------------------------------------- /examples/example_usages.md: -------------------------------------------------------------------------------- 1 | 2 | ## Example 1: Data Analyst NPC - Calculating Sum Using Stats Calculator 3 | ```bash 4 | # Switch to the data analyst NPC 5 | /data_analyst 6 | 7 | # Load data from a table in the database 8 | load data from sales_table 9 | 10 | # Use the stats_calculator jinx to calculate the sum of units sold in the East region 11 | use stats calculator to calculate the sum of 'Units_Sold' where 'Region' is 'East' 12 | ``` 13 | Explanation: 14 | 15 | /data_analyst: Switches to the data_analyst NPC, specialized in data analysis tasks. 16 | load data from sales_table: Loads data from sales_table into the NPC's context for analysis. 17 | 18 | Stats Calculator Usage: Instructs the NPC to perform a sum operation on the Units_Sold column, filtering for records where the Region is East. 19 | 20 | ## Example 2: Data Mode - Plotting a Histogram 21 | 22 | ```bash 23 | # Enter data mode 24 | /data 25 | 26 | # Load the sales data using pandas 27 | df = pd.read_csv('sales_data.csv') 28 | 29 | # Plot a histogram of the 'Units_Sold' column 30 | plt.hist(df['Units_Sold']) 31 | 32 | ``` 33 | Explanation: 34 | 35 | /data: Enters the data mode, allowing direct execution of Python data analysis commands. 36 | Loading Data: Uses pandas to read sales_data.csv into a DataFrame named df. 37 | Plotting: Uses matplotlib to plot a histogram of the Units_Sold column. 38 | Displaying the Plot: Ensures the plot is displayed using plt.show(). 39 | 40 | ## Example 3: Foreman NPC - Checking the Weather 41 | ```bash 42 | 43 | 44 | # Switch to the foreman NPC 45 | /foreman 46 | 47 | # Ask about the weather in a specific location 48 | What's the weather in Tokyo? 49 | ``` 50 | 51 | Explanation: 52 | 53 | /foreman: Switches to the foreman NPC. 54 | Weather Inquiry: The NPC uses the weather_jinx to retrieve and display the current weather in Tokyo. 55 | 56 | ## Example 4: Generating an Image Using the Image Generation Jinx 57 | 58 | ```bash 59 | # Use the image generation jinx within any NPC 60 | Generate an image of a serene mountain landscape during sunrise. 61 | 62 | # Or explicitly call the jinx 63 | Use the image_generation_jinx to create an image of a futuristic city skyline at night. 64 | Explanation: 65 | 66 | Image Generation: Prompts the NPC to use the image_generation_jinx to generate images based on your descriptions. 67 | ``` 68 | 69 | 70 | ## Example 5: Screen Capture Analysis 71 | ```bash 72 | # Invoke the screen capture analysis jinx 73 | Take a screenshot and analyze it for any errors or warnings displayed. 74 | 75 | # Or with a specific prompt 76 | Capture my current screen and tell me what you see. 77 | ``` 78 | 79 | Explanation: 80 | 81 | Screen Capture: The NPC uses the screen_capture_analysis_jinx to capture the current screen and provides an analysis based on the captured image. 82 | 83 | ## Example 6: Calculating with the Calculator Jinx 84 | ```bash 85 | # Use the calculator jinx to compute an expression 86 | Calculate the sum of 15 and 27. 87 | 88 | # Or with a more complex expression 89 | What is the result of (52 * 3) + 19? 90 | Explanation: 91 | 92 | Calculator Usage: The NPC uses the calculator jinx to evaluate mathematical expressions and provides the results. 93 | 94 | ## Example 7: Executing SQL Queries with SQL Executor 95 | 96 | ```bash 97 | # Switch to the data analyst NPC 98 | /data_analyst 99 | 100 | # Execute an SQL query 101 | sql SELECT Region, SUM(Units_Sold) FROM sales_table GROUP BY Region; 102 | # The NPC executes the query and displays the results 103 | ``` 104 | Explanation: 105 | 106 | SQL Execution: Uses the sql_executor jinx to run SQL queries directly against the database and display the results. 107 | 108 | ## Example 8: Yap Mode for Voice Interaction 109 | 110 | ```bash 111 | 112 | 113 | # Enter whisper mode for speech-to-text interaction 114 | /whisper 115 | 116 | # Speak your command after the prompt appears 117 | "Show me the sales trends for the last quarter." 118 | 119 | # The NPC processes the spoken command and provides the output 120 | Explanation: 121 | 122 | /whisper: Activates whisper mode for voice interaction using speech recognition. 123 | Voice Command: Allows you to speak commands instead of typing them. 124 | ``` 125 | ## Example 10: Using the Data Plotter Jinx 126 | ```bash 127 | 128 | 129 | # Switch to the data analyst NPC 130 | /data_analyst 131 | 132 | # Load the data 133 | load data from sales_data_table 134 | 135 | # Use the data_plotter jinx to create a line graph 136 | Use the data_plotter jinx to plot a line graph of 'Date' vs. 'Revenue' from 'sales_data_table' 137 | ``` 138 | Explanation: 139 | 140 | Data Plotter Usage: The NPC uses the data_plotter jinx to generate a line graph, saving the plot as an image file and displaying it. 141 | 142 | ## Example 11: Custom NPCs and Jinxs 143 | ```bash 144 | 145 | # Assuming you have created a custom NPC named 'marketing_analyst' 146 | /marketing_analyst 147 | 148 | # Use a custom jinx for sentiment analysis 149 | Perform sentiment analysis on the latest customer feedback dataset. 150 | ``` 151 | Explanation: 152 | 153 | Custom NPC: Switches to a user-defined NPC tailored for specific roles. 154 | Custom Jinx: Demonstrates how to use a custom jinx within your NPC for specialized tasks. 155 | 156 | 157 | ## Example 12: Data Analysis with Pandas Executor 158 | ```bash 159 | # Switch to the data analyst NPC 160 | /data_analyst 161 | # Use the pandas_executor jinx to compute statistics 162 | Use the pandas_executor jinx with the following code: 163 | code= 164 | """ 165 | mean_units = df['Units_Sold'].mean() 166 | print(f"The average units sold is {mean_units}") 167 | """ 168 | ``` 169 | Explanation: 170 | 171 | Pandas Executor: Executes arbitrary pandas code within the context of the NPC, allowing for customized data analysis. 172 | ## Example 13: Combining Jinxs for Complex Tasks 173 | 174 | ```bash 175 | # Switch to the data analyst NPC 176 | /data_analyst 177 | # Load data into a DataFrame 178 | load data from employee_performance 179 | # Use the stats_calculator to find the average performance score for a department 180 | use stats calculator to calculate the mean of 'Performance_Score' where 'Department' is 'Sales' 181 | # Then, use the data_plotter to visualize the distribution 182 | Use the data_plotter jinx to create a histogram of 'Performance_Score' for the 'Sales' department 183 | ``` 184 | Explanation: 185 | 186 | Combining Jinxs: Demonstrates how to use multiple jinxs in sequence to perform advanced analysis. 187 | -------------------------------------------------------------------------------- /examples/factory/models/customer_feedback.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | feedback, 4 | customer_id, 5 | timestamp 6 | FROM raw_customer_feedback 7 | WHERE LENGTH(feedback) > 10; 8 | -------------------------------------------------------------------------------- /examples/factory/models/customer_insights.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | customer_id, 4 | feedback, 5 | timestamp, 6 | synthesize( 7 | "feedback text: {feedback}", 8 | "../analyst.npc", 9 | "feedback_analysis" 10 | ) as ai_analysis 11 | FROM {{ ref('customer_feedback') }}; 12 | -------------------------------------------------------------------------------- /examples/get_llm_response_examples.py: -------------------------------------------------------------------------------- 1 | from npcpy.llm_funcs import get_llm_response 2 | 3 | response = get_llm_response( 4 | prompt="What is machine learning?", 5 | model="llama3.2", 6 | provider="ollama" 7 | ) 8 | 9 | print("Response:", response['response']) 10 | 11 | 12 | 13 | from npcpy.llm_funcs import get_llm_response 14 | 15 | response = get_llm_response( 16 | prompt="Describe what you see in this image.", 17 | model="gemma3:4b", 18 | provider="ollama", 19 | images=["test_data/markov_chain.png"] 20 | ) 21 | 22 | print("Response:", response['response']) 23 | 24 | 25 | 26 | from npcpy.llm_funcs import get_llm_response 27 | 28 | response = get_llm_response( 29 | prompt="Summarize the key points in this document.", 30 | model="llava:latest", 31 | provider="ollama", 32 | attachments=["test_data/yuan2004.pdf"] 33 | ) 34 | 35 | print("Response:", response['response']) 36 | 37 | 38 | 39 | from npcpy.llm_funcs import get_llm_response 40 | 41 | response = get_llm_response( 42 | prompt="Extract data from these files and highlight the most important information.", 43 | model="llava:7b", 44 | provider="ollama", 45 | attachments=["test_data/yuan2004.pdf", "test_data/markov_chain.png", "test_data/sample_data.csv"] 46 | ) 47 | 48 | print("Response:", response['response']) 49 | 50 | 51 | 52 | 53 | from npcpy.llm_funcs import get_llm_response 54 | from npcpy.npc_compiler import NPC 55 | 56 | # Create a simple NPC with custom system message 57 | npc = NPC( 58 | name="OCR_Assistant", 59 | description="An assistant specialized in document processing and OCR.", 60 | model="llava:7b", 61 | provider="ollama", 62 | directive="You are an expert at analyzing documents and extracting valuable information." 63 | ) 64 | 65 | response = get_llm_response( 66 | prompt="What do you see in this diagram?", 67 | images=["test_data/markov_chain.png"], 68 | npc=npc 69 | ) 70 | 71 | print("Response:", response['response']) 72 | print("System message used:", response['messages'][0]['content'] if response['messages'] else "No system message") 73 | 74 | 75 | from npcpy.llm_funcs import get_llm_response 76 | 77 | # Create a conversation with history 78 | messages = [ 79 | {"role": "system", "content": "You are a document analysis assistant."}, 80 | {"role": "user", "content": "I have some engineering diagrams I need to analyze."}, 81 | {"role": "assistant", "content": "I'd be happy to help analyze your engineering diagrams. Please share them with me."} 82 | ] 83 | 84 | response = get_llm_response( 85 | prompt="Here's the diagram I mentioned earlier.", 86 | model="llava:7b", 87 | provider="ollama", 88 | messages=messages, 89 | attachments=["test_data/markov_chain.png"] 90 | ) 91 | 92 | print("Response:", response['response']) 93 | print("Updated message history length:", len(response['messages'])) 94 | 95 | 96 | 97 | from npcpy.llm_funcs import get_llm_response 98 | 99 | # Create a conversation with history 100 | messages = [ 101 | {"role": "system", "content": "You are a document analysis assistant."}, 102 | {"role": "user", "content": "I have some engineering diagrams I need to analyze."}, 103 | {"role": "assistant", "content": "I'd be happy to help analyze your engineering diagrams. Please share them with me."} 104 | ] 105 | 106 | response = get_llm_response( 107 | prompt="Here's the diagram I mentioned earlier.", 108 | model="llava:7b", 109 | provider="ollama", 110 | messages=messages, 111 | attachments=["test_data/markov_chain.png"] 112 | ) 113 | 114 | print("Response:", response['response']) 115 | print("Updated message history length:", len(response['messages'])) 116 | 117 | 118 | 119 | from npcpy.llm_funcs import get_llm_response 120 | 121 | response = get_llm_response( 122 | prompt="Analyze this image and give a detailed explanation.", 123 | model="llava:7b", 124 | provider="ollama", 125 | images=["test_data/markov_chain.png"], 126 | stream=True 127 | ) 128 | 129 | # For streaming responses, you'd typically iterate through them 130 | print("Streaming response object type:", type(response['response'])) 131 | 132 | 133 | from npcpy.llm_funcs import get_llm_response 134 | from npcpy.data.load import load_pdf, load_image 135 | import os 136 | import pandas as pd 137 | import json 138 | from PIL import Image 139 | import io 140 | import numpy as np 141 | 142 | # Example paths 143 | pdf_path = 'test_data/yuan2004.pdf' 144 | image_path = 'test_data/markov_chain.png' 145 | csv_path = 'test_data/sample_data.csv' 146 | 147 | # Method 1: Simple attachment-based approach 148 | response = get_llm_response( 149 | 'Extract and analyze all text and images from these files. What are the key concepts presented?', 150 | model='llava:7b', 151 | provider='ollama', 152 | attachments=[pdf_path, image_path, csv_path] 153 | ) 154 | 155 | print("\nResponse from attachment-based approach:") 156 | print(response['response']) -------------------------------------------------------------------------------- /examples/journalist.npc: -------------------------------------------------------------------------------- 1 | name: journalist 2 | model: llama3.2 3 | provider: ollama 4 | primary_directive: | 5 | You are a skilled AI journalist assistant. Your responsibilities include 6 | collecting insights from news articles, identifying key themes, and 7 | ensuring accuracy and clarity in information dissemination. -------------------------------------------------------------------------------- /examples/market_analyst.npc: -------------------------------------------------------------------------------- 1 | name: market_analyst 2 | primary_directive: You are an AI assistant focused on monitoring and analyzing market trends. Provide de 3 | jinxs: 4 | - market trend analysis 5 | - report generation 6 | model: llama3.2 7 | provider: ollama -------------------------------------------------------------------------------- /examples/model_gen.fab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/examples/model_gen.fab -------------------------------------------------------------------------------- /examples/morning_routine.pipe: -------------------------------------------------------------------------------- 1 | steps: 2 | - step_name: "review_email" 3 | npc: "{{ ref('email_assistant') }}" 4 | task: "Get me up to speed on my recent emails: {{source('emails')}}." 5 | 6 | 7 | - step_name: "market_update" 8 | npc: "{{ ref('market_analyst') }}" 9 | task: "Give me an update on the latest events in the market: {{source('market_events')}}." 10 | 11 | - step_name: "summarize" 12 | npc: "{{ ref('critic') }}" 13 | model: gpt-4o-mini 14 | provider: openai 15 | task: "Review the outputs from the {{review_email}} and {{market_update}} and provide me with a summary." 16 | -------------------------------------------------------------------------------- /examples/news_analysis.pipe: -------------------------------------------------------------------------------- 1 | # news_analysis.pipe 2 | steps: 3 | - step_name: "classify_news" 4 | npc: "{{ ref('news_assistant') }}" 5 | task: | 6 | Classify the following news articles into one of the categories: 7 | ["Politics", "Economy", "Technology", "Sports", "Health"]. 8 | {{ source('news_articles') }} 9 | 10 | - step_name: "analyze_news" 11 | npc: "{{ ref('news_assistant') }}" 12 | batch_mode: true # Process articles with knowledge of their tags 13 | task: | 14 | Based on the category assigned in {{classify_news}}, provide an in-depth 15 | analysis and perspectives on the article. Consider these aspects: 16 | ["Impacts", "Market Reaction", "Cultural Significance", "Predictions"]. 17 | {{ source('news_articles') }} -------------------------------------------------------------------------------- /examples/news_analysis_mixa.pipe: -------------------------------------------------------------------------------- 1 | steps: 2 | - step_name: "classify_news" 3 | npc: "news_assistant" 4 | mixa: true 5 | mixa_agents: 6 | - "{{ ref('news_assistant') }}" 7 | - "{{ ref('journalist_npc') }}" 8 | - "{{ ref('data_scientist_npc') }}" 9 | mixa_voters: 10 | - "{{ ref('critic_npc') }}" 11 | - "{{ ref('editor_npc') }}" 12 | - "{{ ref('researcher_npc') }}" 13 | mixa_voter_count: 5 14 | mixa_turns: 3 15 | mixa_strategy: "vote" 16 | task: | 17 | Classify the following news articles... 18 | {{ source('news_articles') }} -------------------------------------------------------------------------------- /examples/news_assistant.npc: -------------------------------------------------------------------------------- 1 | name: news_assistant 2 | model: llama3.2 3 | provider: ollama 4 | primary_directive: | 5 | You are an advanced AI assistant specialized in processing news articles. Your role involves 6 | classifying articles, analyzing them from various perspectives, and ensuring the processed 7 | data is stored back into the system efficiently. -------------------------------------------------------------------------------- /examples/npc_team/analyst.npc: -------------------------------------------------------------------------------- 1 | name: analyst 2 | primary_directive: Assist with data analysis, visualization, and predictive modeling 3 | model: gpt-4o-mini 4 | provider: openai 5 | -------------------------------------------------------------------------------- /examples/npc_team/create_emails_table.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script to create and populate the emails table needed by the morning_routine pipeline. 4 | """ 5 | 6 | import sqlite3 7 | import os 8 | import sys 9 | import json 10 | from datetime import datetime, timedelta 11 | 12 | 13 | def create_emails_table(db_path): 14 | """Create and populate the emails table for the morning_routine pipeline.""" 15 | conn = sqlite3.connect(db_path) 16 | cursor = conn.cursor() 17 | 18 | try: 19 | # Check if table exists 20 | cursor.execute( 21 | "SELECT name FROM sqlite_master WHERE type='table' AND name='emails'" 22 | ) 23 | if cursor.fetchone(): 24 | print("Table 'emails' already exists.") 25 | return True 26 | 27 | # Create the emails table 28 | print("Creating 'emails' table...") 29 | cursor.execute( 30 | """ 31 | CREATE TABLE emails ( 32 | id INTEGER PRIMARY KEY AUTOINCREMENT, 33 | sender TEXT, 34 | recipient TEXT, 35 | subject TEXT, 36 | content TEXT, 37 | timestamp DATETIME, 38 | is_read BOOLEAN DEFAULT 0, 39 | priority INTEGER DEFAULT 0 40 | ) 41 | """ 42 | ) 43 | 44 | # Add some sample data 45 | print("Adding sample email data...") 46 | now = datetime.now() 47 | sample_emails = [ 48 | { 49 | "sender": "boss@company.com", 50 | "recipient": "user@company.com", 51 | "subject": "Weekly Progress Report", 52 | "content": "Please send me your weekly progress report by end of day.", 53 | "timestamp": (now - timedelta(hours=2)).strftime("%Y-%m-%d %H:%M:%S"), 54 | "is_read": 0, 55 | "priority": 2, 56 | }, 57 | { 58 | "sender": "colleague@company.com", 59 | "recipient": "user@company.com", 60 | "subject": "Project Update", 61 | "content": "Here's the latest update on the project we're working on.", 62 | "timestamp": (now - timedelta(hours=5)).strftime("%Y-%m-%d %H:%M:%S"), 63 | "is_read": 1, 64 | "priority": 1, 65 | }, 66 | { 67 | "sender": "newsletter@tech.com", 68 | "recipient": "user@company.com", 69 | "subject": "Daily Tech News", 70 | "content": "Here are today's top tech stories...", 71 | "timestamp": (now - timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S"), 72 | "is_read": 0, 73 | "priority": 0, 74 | }, 75 | ] 76 | 77 | for email in sample_emails: 78 | cursor.execute( 79 | """ 80 | INSERT INTO emails (sender, recipient, subject, content, timestamp, is_read, priority) 81 | VALUES (?, ?, ?, ?, ?, ?, ?) 82 | """, 83 | ( 84 | email["sender"], 85 | email["recipient"], 86 | email["subject"], 87 | email["content"], 88 | email["timestamp"], 89 | email["is_read"], 90 | email["priority"], 91 | ), 92 | ) 93 | 94 | conn.commit() 95 | print("Email table created and populated successfully.") 96 | return True 97 | 98 | except Exception as e: 99 | print(f"Error creating emails table: {e}") 100 | conn.rollback() 101 | return False 102 | finally: 103 | conn.close() 104 | 105 | 106 | if __name__ == "__main__": 107 | # Default path or take from command line 108 | default_db_path = os.path.expanduser("~/npcsh_history.db") 109 | 110 | if len(sys.argv) > 1: 111 | db_path = sys.argv[1] 112 | else: 113 | db_path = default_db_path 114 | 115 | print(f"Setting up emails table in database at: {db_path}") 116 | success = create_emails_table(db_path) 117 | 118 | if success: 119 | print("Setup completed successfully.") 120 | sys.exit(0) 121 | else: 122 | print("Setup failed.") 123 | sys.exit(1) 124 | -------------------------------------------------------------------------------- /examples/npc_team/create_market_events_table.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Script to create and populate the market_events table needed by the NPC profile compiler. 4 | """ 5 | 6 | import sqlite3 7 | import os 8 | import sys 9 | import json 10 | from datetime import datetime, timedelta 11 | 12 | 13 | def create_market_events_table(db_path): 14 | """Create and populate the market_events table for the NPC profile compiler.""" 15 | conn = sqlite3.connect(db_path) 16 | cursor = conn.cursor() 17 | 18 | try: 19 | # Check if table exists 20 | cursor.execute( 21 | "SELECT name FROM sqlite_master WHERE type='table' AND name='market_events'" 22 | ) 23 | if cursor.fetchone(): 24 | print("Table 'market_events' already exists.") 25 | return True 26 | 27 | # Create the market_events table 28 | print("Creating 'market_events' table...") 29 | cursor.execute( 30 | """ 31 | CREATE TABLE market_events ( 32 | id INTEGER PRIMARY KEY AUTOINCREMENT, 33 | event_type TEXT, 34 | description TEXT, 35 | impact_level INTEGER, 36 | timestamp DATETIME, 37 | market_sector TEXT, 38 | price_change REAL, 39 | volume INTEGER, 40 | is_processed BOOLEAN DEFAULT 0 41 | ) 42 | """ 43 | ) 44 | 45 | # Add some sample data 46 | print("Adding sample market event data...") 47 | now = datetime.now() 48 | sample_events = [ 49 | { 50 | "event_type": "earnings_report", 51 | "description": "XYZ Corp reports quarterly earnings above expectations", 52 | "impact_level": 3, 53 | "timestamp": (now - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S"), 54 | "market_sector": "technology", 55 | "price_change": 2.45, 56 | "volume": 1250000, 57 | "is_processed": 1, 58 | }, 59 | { 60 | "event_type": "market_crash", 61 | "description": "Stock market drops 5% on inflation concerns", 62 | "impact_level": 5, 63 | "timestamp": (now - timedelta(days=3)).strftime("%Y-%m-%d %H:%M:%S"), 64 | "market_sector": "global", 65 | "price_change": -5.12, 66 | "volume": 8500000, 67 | "is_processed": 1, 68 | }, 69 | { 70 | "event_type": "merger_announcement", 71 | "description": "ABC Inc announces acquisition of DEF Corp", 72 | "impact_level": 4, 73 | "timestamp": (now - timedelta(days=2)).strftime("%Y-%m-%d %H:%M:%S"), 74 | "market_sector": "healthcare", 75 | "price_change": 3.75, 76 | "volume": 3200000, 77 | "is_processed": 0, 78 | }, 79 | { 80 | "event_type": "policy_change", 81 | "description": "Central bank raises interest rates by 0.25%", 82 | "impact_level": 4, 83 | "timestamp": (now - timedelta(hours=12)).strftime("%Y-%m-%d %H:%M:%S"), 84 | "market_sector": "finance", 85 | "price_change": -1.20, 86 | "volume": 4800000, 87 | "is_processed": 0, 88 | }, 89 | { 90 | "event_type": "product_launch", 91 | "description": "New smartphone model released with innovative features", 92 | "impact_level": 2, 93 | "timestamp": (now - timedelta(days=5)).strftime("%Y-%m-%d %H:%M:%S"), 94 | "market_sector": "consumer_electronics", 95 | "price_change": 1.85, 96 | "volume": 2100000, 97 | "is_processed": 1, 98 | }, 99 | ] 100 | 101 | for event in sample_events: 102 | cursor.execute( 103 | """ 104 | INSERT INTO market_events (event_type, description, impact_level, timestamp, market_sector, price_change, volume, is_processed) 105 | VALUES (?, ?, ?, ?, ?, ?, ?, ?) 106 | """, 107 | ( 108 | event["event_type"], 109 | event["description"], 110 | event["impact_level"], 111 | event["timestamp"], 112 | event["market_sector"], 113 | event["price_change"], 114 | event["volume"], 115 | event["is_processed"], 116 | ), 117 | ) 118 | 119 | conn.commit() 120 | print("Market events table created and populated successfully.") 121 | return True 122 | 123 | except Exception as e: 124 | print(f"Error creating market_events table: {e}") 125 | conn.rollback() 126 | return False 127 | finally: 128 | conn.close() 129 | 130 | 131 | if __name__ == "__main__": 132 | # Default path or take from command line 133 | default_db_path = os.path.expanduser("~/npcsh_history.db") 134 | 135 | if len(sys.argv) > 1: 136 | db_path = sys.argv[1] 137 | else: 138 | db_path = default_db_path 139 | 140 | print(f"Setting up market_events table in database at: {db_path}") 141 | success = create_market_events_table(db_path) 142 | 143 | if success: 144 | print("Setup completed successfully.") 145 | sys.exit(0) 146 | else: 147 | print("Setup failed.") 148 | sys.exit(1) 149 | -------------------------------------------------------------------------------- /examples/npc_team/critic.npc: -------------------------------------------------------------------------------- 1 | name: critic 2 | model: gpt-4o-mini 3 | provider: openai 4 | primary_directive: | 5 | You are a critical analysis AI assistant. You provide 6 | critical reviews of inputs, questioning their biases, validity, 7 | and impact, ensuring a balanced assessment of the information presented. 8 | -------------------------------------------------------------------------------- /examples/npc_team/email_assistant.npc: -------------------------------------------------------------------------------- 1 | name: email_assistant 2 | primary_directive: Assist with email management and summarization 3 | model: gpt-4o-mini 4 | provider: openai -------------------------------------------------------------------------------- /examples/npc_team/factory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/examples/npc_team/factory/__init__.py -------------------------------------------------------------------------------- /examples/npc_team/factory/data/raw_customer_feedback.csv: -------------------------------------------------------------------------------- 1 | feedback,customer_id,timestamp 2 | Know after address.,282,2023-02-22 16:08:03.071464 3 | Continue clearly.,837,2024-01-23 00:44:01.930822 4 | Growth method step.,214,2024-01-23 16:10:24.260741 5 | Build every.,344,2024-10-19 20:34:24.635831 6 | Remember music.,629,2024-06-27 14:09:08.459428 7 | Land great art high.,381,2021-03-15 15:19:02.948027 8 | Guy energy everyone.,477,2024-03-08 20:41:15.621966 9 | Maybe history win I.,737,2023-09-14 00:24:52.095415 10 | See necessary blue.,761,2021-02-04 06:32:26.147805 11 | Economy do rest.,5,2023-12-24 05:47:05.028266 12 | Summer board then.,234,2024-03-27 15:18:59.945688 13 | Economic marriage.,780,2020-01-16 10:39:43.501557 14 | Camera spring see.,12,2023-05-19 22:08:32.900402 15 | Society former star.,170,2021-05-29 21:01:42.339053 16 | Class lead summer.,20,2023-10-08 15:50:31.646948 17 | Base get old.,11,2022-11-22 06:28:22.207684 18 | Commercial identify.,108,2020-09-14 15:53:48.187350 19 | Car wear range.,521,2024-01-05 18:21:57.761524 20 | National lead.,274,2020-11-24 06:29:16.485297 21 | Him story hand.,74,2023-11-07 11:59:38.471080 22 | Participant cut.,727,2023-02-11 21:27:41.106731 23 | Person worker.,457,2021-10-18 10:09:45.903805 24 | Growth cup music.,248,2020-07-21 02:20:03.023042 25 | From mind majority.,581,2024-01-20 21:20:41.293576 26 | Big stop western.,137,2022-07-29 01:12:31.282701 27 | Property claim.,302,2021-12-06 05:26:13.723492 28 | Kitchen fly produce.,105,2020-08-15 12:23:07.868138 29 | Imagine nature.,12,2022-10-23 12:17:28.047251 30 | Artist quickly drug.,677,2020-03-06 18:47:56.318400 31 | Believe cold.,267,2023-06-25 15:20:01.973022 32 | Big mention run.,574,2023-06-11 20:38:55.071900 33 | Talk walk family.,190,2024-07-24 22:06:34.775046 34 | Play with mean data.,446,2022-07-31 23:34:11.119657 35 | Short country.,703,2023-02-21 17:39:41.649069 36 | Production Mr well.,145,2024-05-24 13:33:23.375577 37 | Capital coach very.,27,2022-02-11 08:13:17.709760 38 | Meeting power them.,588,2020-05-05 03:38:28.636023 39 | Fast perhaps mouth.,351,2021-12-16 09:58:48.540081 40 | Reality point black.,235,2021-04-12 17:23:05.145397 41 | Tell Mr physical.,946,2021-10-31 04:42:58.966309 42 | Story main any.,976,2021-03-04 16:59:06.148898 43 | Down figure.,959,2020-02-11 14:41:51.956573 44 | Sport mean cold.,275,2024-04-23 14:21:46.482039 45 | Task approach wall.,845,2021-05-31 12:22:14.001677 46 | Trade common peace.,226,2023-10-05 18:29:22.980586 47 | Minute present from.,685,2022-10-08 07:11:02.793845 48 | Already management.,21,2024-01-08 00:10:37.299316 49 | Start per standard.,778,2024-11-01 11:32:18.501537 50 | Room throughout.,974,2023-02-14 16:00:02.315955 51 | Sit physical.,565,2024-06-17 16:03:25.308118 52 | Now list nature now.,466,2021-08-05 00:26:09.311518 53 | Beautiful market.,910,2020-02-04 17:48:32.207817 54 | Identify popular.,282,2020-03-24 21:58:32.117640 55 | Both whose bed able.,263,2022-10-11 23:42:42.364636 56 | One by or memory.,770,2020-07-16 10:20:51.721086 57 | Letter success.,363,2020-07-30 09:12:24.964346 58 | Mind fast daughter.,882,2020-05-19 02:40:16.753525 59 | Resource effort.,484,2023-07-19 08:17:06.144270 60 | Possible feel stand.,422,2020-04-05 21:00:08.179135 61 | Family either.,733,2021-08-19 17:20:25.987439 62 | Sign under respond.,634,2024-11-05 17:58:37.632752 63 | Fight federal.,303,2023-05-28 18:58:50.078019 64 | Build notice onto.,976,2023-07-12 16:07:47.322914 65 | Almost food shake.,776,2023-04-22 03:23:02.547022 66 | Today dark.,888,2022-01-06 13:41:05.281459 67 | Sister agent thus.,807,2024-08-04 23:25:44.045255 68 | Might everything.,182,2022-10-16 02:54:47.094131 69 | In close society.,122,2022-11-03 10:55:43.406676 70 | Need until chair.,782,2020-02-08 21:20:34.181664 71 | Music most improve.,182,2021-04-16 18:48:18.706793 72 | Teach animal else.,89,2023-02-11 12:38:38.889451 73 | Source million.,524,2020-10-10 07:25:17.320793 74 | Ok low dog kitchen.,20,2022-06-14 07:59:02.286582 75 | So more off economy.,141,2021-08-09 08:37:22.761744 76 | Treatment enough.,820,2022-08-19 01:40:09.408480 77 | Single part claim.,329,2022-08-09 06:18:55.659178 78 | Call feel herself.,493,2022-03-14 13:16:14.080570 79 | Radio maybe.,889,2021-02-20 09:16:54.309110 80 | Generation six.,338,2023-11-26 14:13:17.259366 81 | Hospital early name.,93,2023-08-02 10:48:38.616681 82 | Wrong baby kind.,285,2023-06-11 04:21:53.928823 83 | Could official.,913,2020-05-13 19:34:33.363381 84 | Why career drive.,11,2023-02-05 20:37:32.034290 85 | Lose kid.,75,2022-08-25 00:58:53.194594 86 | Go least back again.,296,2020-02-29 09:45:46.865171 87 | Likely guy myself.,182,2020-12-25 11:00:35.769362 88 | Radio recognize.,820,2024-05-27 09:47:07.016832 89 | Church pass pick.,153,2024-07-22 14:58:59.720392 90 | Power away yes.,698,2022-07-25 23:44:23.201949 91 | Memory price could.,404,2024-08-16 04:32:40.075796 92 | Occur become see.,394,2023-04-20 23:23:29.585479 93 | Money dog yes upon.,29,2023-12-05 17:28:58.889128 94 | Draw former nice.,718,2024-07-16 04:03:28.991104 95 | Throughout laugh.,526,2024-02-19 00:14:58.084525 96 | Then bring usually.,293,2024-01-05 09:11:48.163003 97 | Walk increase.,831,2021-04-10 14:46:23.641361 98 | Court laugh mother.,33,2020-05-19 20:46:55.292482 99 | Those through.,470,2020-11-03 11:57:23.034024 100 | Career such start.,389,2021-01-18 10:09:55.202721 101 | Remain enjoy.,934,2022-09-14 15:45:51.151535 102 | -------------------------------------------------------------------------------- /examples/npc_team/factory/example_models/customer_feedback.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | feedback, 4 | customer_id, 5 | timestamp 6 | FROM raw_customer_feedback 7 | WHERE LENGTH(feedback) > 10; 8 | -------------------------------------------------------------------------------- /examples/npc_team/factory/example_models/customer_insights.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | customer_id, 4 | feedback, 5 | timestamp, 6 | synthesize( 7 | "feedback text: {feedback}", 8 | "analyst", 9 | "feedback_analysis" 10 | ) as ai_analysis 11 | FROM {{ ref('customer_feedback') }}; 12 | -------------------------------------------------------------------------------- /examples/npc_team/factory/model_runner.py: -------------------------------------------------------------------------------- 1 | # Usage example: 2 | def main(): 3 | # Initialize 4 | runner = NPCModelRunner(npc_compiler) 5 | 6 | # Run first model 7 | with open("models/customer_feedback.sql", "r") as f: 8 | feedback_model = runner.run_model("customer_feedback", f.read()) 9 | print("First model results:") 10 | print(feedback_model.head()) 11 | 12 | # Run second model that depends on the first 13 | with open("models/customer_insights.sql", "r") as f: 14 | insights_model = runner.run_model("customer_insights", f.read()) 15 | print("\nSecond model results:") 16 | print(insights_model.head()) 17 | 18 | # Check history 19 | with sqlite3.connect(runner.history_db) as conn: 20 | history = pd.read_sql( 21 | "SELECT * FROM model_runs ORDER BY run_timestamp DESC", conn 22 | ) 23 | print("\nModel run history:") 24 | print(history) 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /examples/npc_team/factory/models/customer_feedback.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | feedback, 4 | customer_id, 5 | timestamp 6 | FROM raw_customer_feedback 7 | WHERE LENGTH(feedback) > 10; 8 | -------------------------------------------------------------------------------- /examples/npc_team/factory/models/customer_insights.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT 3 | customer_id, 4 | feedback, 5 | timestamp, 6 | synthesize( 7 | "feedback text: {feedback}", 8 | "analyst", 9 | "feedback_analysis" 10 | ) as ai_analysis 11 | FROM {{ ref('customer_feedback') }}; 12 | -------------------------------------------------------------------------------- /examples/npc_team/factory/morning_routine.pipe: -------------------------------------------------------------------------------- 1 | steps: 2 | - step_name: "review_email" 3 | npc: "email_assistant" 4 | task: "Get me up to speed on my recent emails" 5 | 6 | - step_name: "market_update" 7 | npc: "market_analyst" 8 | task: "Give me an update on the latest events in the market." 9 | 10 | - step_name: "summarize" 11 | npc: "sibiji" 12 | task: "Review the outputs from the email_assistant and market_analysts and provide me with a summary." -------------------------------------------------------------------------------- /examples/npc_team/factory/npc_project.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | business_context: 3 | market_focus: growth 4 | priority: customer_retention 5 | quarter: Q4 -------------------------------------------------------------------------------- /examples/npc_team/market_analyst.npc: -------------------------------------------------------------------------------- 1 | name: market_analyst 2 | primary_directive: Provide updates on the latest market events 3 | model: gpt-4o-mini 4 | provider: openai -------------------------------------------------------------------------------- /examples/pipe_examples.md: -------------------------------------------------------------------------------- 1 | # Pipe file content to NPC sample command 2 | cat data.json | npc sample "Summarize this JSON data" 3 | 4 | # Pipe command output to NPC 5 | ls -la | npc sample "Explain what these files are" 6 | 7 | # Use grep to filter logs and have NPC analyze them 8 | grep ERROR /var/log/application.log | npc sample "What are the common error patterns?" 9 | 10 | # Use curl to get API response and analyze with NPC 11 | curl -s https://api.example.com/data | npc sample "Analyze this API response" 12 | 13 | # Create a multi-line prompt using heredoc 14 | cat << EOF | npc sample 15 | I need to understand how to structure a React application. 16 | What are the best practices for component organization? 17 | Should I use Redux or Context API for state management? 18 | EOF 19 | 20 | 21 | # Chain NPC commands using xargs 22 | npc search "machine learning algorithms" | xargs -I {} npc sample "Explain {} in detail" 23 | 24 | # Use output from one NPC command as input to another 25 | npc sample "Generate 5 test cases" | npc sample "Convert these test cases to JavaScript code" 26 | 27 | # Use NPC to generate code and then analyze it 28 | npc sample "Write a Python sorting algorithm" | npc sample "Review this code for efficiency" 29 | 30 | # Generate image description and then create image 31 | npc sample "Describe a futuristic cityscape" | xargs npc vixynt 32 | 33 | 34 | # Save NPC output to a file 35 | npc sample "Write a Python script to process CSV files" > process_csv.py 36 | 37 | # Count words in NPC response 38 | npc sample "Write a short essay about AI" | wc -w 39 | 40 | # Format NPC output using jq (if JSON output is enabled) 41 | npc search "cryptocurrency news" --format=json | jq '.results[0]' 42 | 43 | # Use NPC output as input to another tool 44 | npc sample "Generate a list of SQL commands" | sqlite3 mydatabase.db 45 | 46 | # Filter NPC response 47 | npc sample "List 20 programming languages with their use cases" | grep -i "python" 48 | 49 | # Send NPC output to clipboard 50 | npc sample "Write a shell script to backup files" | xclip -selection clipboard 51 | 52 | # Generate code and make it executable 53 | npc sample "Write a bash script to organize files by extension" > organize.sh && chmod +x organize.sh 54 | 55 | # Save NPC image generation to specific file 56 | npc vixynt "A cyberpunk city" | convert - cyberpunk_city.png -------------------------------------------------------------------------------- /examples/presenter.npc: -------------------------------------------------------------------------------- 1 | name: presenter 2 | primary_directive: Create a presentation deck summarizing the analysis. 3 | jinxs: 4 | - deck_creator 5 | model: claude-3-haiku-20240307 6 | provider: anthropic -------------------------------------------------------------------------------- /examples/project.ctx: -------------------------------------------------------------------------------- 1 | 2 | databases: 3 | -'~/npcsh_history.db' 4 | files: 5 | -[ './*.npc',] 6 | vars: 7 | -age: 30 8 | 9 | -------------------------------------------------------------------------------- /examples/researcher.npc: -------------------------------------------------------------------------------- 1 | name: researcher 2 | model: llama3.2 3 | provider: ollama 4 | primary_directive: | 5 | You are an AI researcher assistant. Your duty is to support the 6 | investigation of news articles by providing background research, 7 | identifying sources, and ensuring that facts are checked thoroughly 8 | for accuracy. 9 | -------------------------------------------------------------------------------- /examples/server_curl_examples.md: -------------------------------------------------------------------------------- 1 | curl -N -X POST http://localhost:5337/api/stream \ 2 | -H "Content-Type: application/json" \ 3 | -d '{ 4 | "commandstr": "Hello, how are you?", 5 | "conversationId": "test-conv-123", 6 | "model": "gpt-4o-mini", 7 | "provider": "openai", 8 | "saveToSqlite3": false, 9 | "npc": "sibiji", 10 | "currentPath": "/home/user", 11 | "messages": [], 12 | "attachments": [] 13 | }' -------------------------------------------------------------------------------- /examples/test_npc_tools_network.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | import yaml 4 | from jinja2 import Environment, FileSystemLoader, Template, Undefined 5 | import pandas as pd 6 | from typing import Dict, Any, Optional, Union, List, Set 7 | from collections import defaultdict, deque 8 | from npcpy.npc_compiler import NPC, Tool, Team 9 | 10 | import sqlite3 11 | import unittest 12 | 13 | 14 | db_path = os.path.expanduser("~/npcsh_history.db") 15 | db_conn = sqlite3.connect(db_path) 16 | 17 | 18 | class TestNPCFunctionality(unittest.TestCase): 19 | @classmethod 20 | def setUpClass(cls): 21 | # Set up a test SQLite database in memory 22 | cls.db_path = ":memory:" 23 | cls.conn = sqlite3.connect(cls.db_path) 24 | cls.create_test_tables() 25 | 26 | # Create test NPCs 27 | cls.npcs = cls.create_test_npcs() 28 | 29 | @classmethod 30 | def create_test_tables(cls): 31 | cursor = cls.conn.cursor() 32 | cursor.execute( 33 | """ 34 | CREATE TABLE IF NOT EXISTS user_feedback ( 35 | id INTEGER PRIMARY KEY AUTOINCREMENT, 36 | feedback TEXT, 37 | user_preference TEXT 38 | ) 39 | """ 40 | ) 41 | cls.conn.commit() 42 | 43 | @classmethod 44 | def create_test_npcs(cls): 45 | # Create three test NPCs with A/B testing focus 46 | # Load NPCs from files 47 | npc1 = NPC(file="./npc_team/datacollector.npc", db_conn=db_conn) 48 | npc2 = NPC(file="./npc_team/analyzer.npc", db_conn=db_conn) 49 | npc3 = NPC(file="./npc_team/presenter.npc", db_conn=db_conn) 50 | 51 | return [npc1, npc2, npc3] 52 | 53 | def test_npc_tool_loading(self): 54 | # Assert that default tools are loaded correctly 55 | for npc in self.npcs: 56 | self.assertGreater( 57 | len(npc.jinxs), 0, f"{npc.name} should have tools loaded." 58 | ) 59 | 60 | def test_npc_responses(self): 61 | # Simulate interaction to verify expected responses 62 | feedbacks = [ 63 | ("Feedback 1: I like the new layout!", "Positive"), 64 | ("Feedback 2: It's too cluttered.", "Negative"), 65 | ("Feedback 3: The color scheme needs to change.", "Neutral"), 66 | ] 67 | 68 | cursor = self.conn.cursor() 69 | cursor.executemany( 70 | "INSERT INTO user_feedback (feedback, user_preference) VALUES (?, ?);", 71 | feedbacks, 72 | ) 73 | self.conn.commit() 74 | 75 | # Test the DataCollector NPC 76 | responses = [] 77 | for feedback in feedbacks: 78 | response = self.npcs[0].get_llm_response( 79 | f"Gather feedback for: {feedback[0]}" 80 | ) 81 | responses.append(response) 82 | 83 | self.assertTrue( 84 | all(r is not None for r in responses), "All responses must be valid" 85 | ) 86 | 87 | def test_npc_interactions(self): 88 | # Assert that NPCs work together as expected 89 | feedback = "User prefers a minimalistic design with easy navigation." 90 | 91 | # Collect feedback using DataCollector 92 | self.npcs[0].get_llm_response(f"User Feedback: {feedback}") 93 | 94 | # Analyze the feedback using Analyzer 95 | analysis = self.npcs[1].get_llm_response(f"Analyze feedback: {feedback}") 96 | self.assertIsNotNone(analysis, "Analysis should return a valid result") 97 | 98 | # Create a presentation using Presenter 99 | presentation = self.npcs[2].get_llm_response( 100 | f"Create presentation from analysis: {analysis}" 101 | ) 102 | self.assertIsNotNone(presentation, "Presentation should return a valid result") 103 | 104 | 105 | if __name__ == "__main__": 106 | unittest.main() 107 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: npcpy 2 | site_url: https://npcpy.readthedocs.io/ 3 | repo_url: https://github.com/cagostino/npcpy 4 | 5 | theme: 6 | name: readthedocs 7 | features: 8 | - navigation.tabs 9 | - content.code.copy 10 | - content.tabs.link 11 | 12 | markdown_extensions: 13 | - admonition 14 | - pymdownx.superfences 15 | - attr_list 16 | - pymdownx.highlight: 17 | anchor_linenums: true 18 | 19 | plugins: 20 | - mkdocstrings: 21 | handlers: 22 | python: 23 | import: 24 | - https://docs.python.org/3/objects.inv 25 | selection: 26 | members: 27 | - public 28 | filters: 29 | - "!^__init__$" # Properly excludes __init__ 30 | rendering: 31 | show_source: true 32 | show_root_heading: false 33 | nav: 34 | - Home: index.md 35 | - Installation: installation.md 36 | - 'Using npcsh, the NPC Shell': guide.md 37 | - 'Using alicanto': alicanto.md 38 | - 'Using guac': guac.md 39 | - 'Using pti': pti.md 40 | - 'Using spool': spool.md 41 | - 'Using wander': wander.md 42 | - 'Using yap': yap.md 43 | 44 | 45 | - TLDR Cheat Sheet: TLDR_Cheat_sheet.md 46 | - API: 47 | - Image: api/image.md 48 | - LLM Functions: api/llm_funcs.md 49 | - NPC, Team, Jinx: api/npc_compiler.md 50 | - NPC Sys Env Helpers: api/npc_sys_env_helpers.md -------------------------------------------------------------------------------- /npcpy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy.png -------------------------------------------------------------------------------- /npcpy/__init__.py: -------------------------------------------------------------------------------- 1 | from . import npc_compiler 2 | from . import npc_sysenv 3 | from . import routes 4 | from . import llm_funcs 5 | from . import modes 6 | try: 7 | from . import npcs 8 | except ImportError: 9 | pass 10 | from . import sql 11 | from . import work 12 | 13 | from . import gen -------------------------------------------------------------------------------- /npcpy/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Import statements 2 | import numpy as np 3 | 4 | # Define sample primary directives for NPCs 5 | sample_primary_directives = [ 6 | ("Research Assistant", "Help users find and analyze information"), 7 | ("Creativity Coach", "Assist with creative tasks and idea generation"), 8 | ("Problem Solver", "Help users approach complex problems methodically"), 9 | ("Learning Guide", "Support educational endeavors and learning new skills"), 10 | ("Critical Thinker", "Analyze situations from multiple perspectives"), 11 | ("Strategic Planner", "Help develop and refine strategic plans"), 12 | ("Summarizer", "Condense and extract key information from complex data") 13 | ] -------------------------------------------------------------------------------- /npcpy/data/data_models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from typing import List, Dict 3 | 4 | 5 | class NPC_Model(BaseModel): 6 | name: str 7 | primary_directive: str 8 | model: str 9 | provider: str 10 | api_url: str 11 | jinxs: List[str] 12 | 13 | 14 | class Jinx_Model(BaseModel): 15 | jinx_name: str 16 | description: str 17 | steps: List[Dict[str, str]] 18 | 19 | 20 | class JinxStep_Model(BaseModel): 21 | engine: str 22 | code: str 23 | 24 | 25 | class Context_Model(BaseModel): 26 | databases: List[str] 27 | files: List[str] 28 | vars: List[Dict[str, str]] 29 | 30 | 31 | class Pipeline_Model(BaseModel): 32 | steps: List[Dict[str, str]] 33 | 34 | 35 | class PipelineStep_Model(BaseModel): 36 | jinx: str 37 | args: List[str] 38 | model: str 39 | provider: str 40 | task: str 41 | npc: str 42 | -------------------------------------------------------------------------------- /npcpy/data/dataframes.py: -------------------------------------------------------------------------------- 1 | ## functions for dataframes 2 | import os 3 | import sqlite3 4 | import json 5 | import pandas as pd 6 | import numpy as np 7 | import io 8 | from PIL import Image 9 | from typing import Optional 10 | 11 | from npcpy.llm_funcs import get_llm_response 12 | 13 | # from npcpy.audio import process_audio 14 | # from npcpy.video import process_video 15 | 16 | from npcpy.data.load import ( 17 | load_pdf, 18 | load_csv, 19 | load_json, 20 | load_excel, 21 | load_txt, 22 | load_image, 23 | ) 24 | 25 | 26 | def load_data_into_table( 27 | file_path: str, table_name: str, cursor: sqlite3.Cursor, conn: sqlite3.Connection 28 | ) -> None: 29 | """ 30 | Function Description: 31 | This function is used to load data into a table. 32 | Args: 33 | file_path : str : The file path. 34 | table_name : str : The table name. 35 | cursor : sqlite3.Cursor : The SQLite cursor. 36 | conn : sqlite3.Connection : The SQLite connection. 37 | Keyword Args: 38 | None 39 | Returns: 40 | None 41 | """ 42 | try: 43 | if not os.path.exists(file_path): 44 | raise FileNotFoundError(f"File not found: {file_path}") 45 | 46 | # Determine file type and load data 47 | if file_path.endswith(".csv"): 48 | df = pd.read_csv(file_path) 49 | elif file_path.endswith(".pdf"): 50 | df = load_pdf(file_path) 51 | elif file_path.endswith((".txt", ".log", ".md")): 52 | df = load_txt(file_path) 53 | elif file_path.endswith((".xls", ".xlsx")): 54 | df = load_excel(file_path) 55 | elif file_path.lower().endswith( 56 | (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff") 57 | ): 58 | # Handle images as NumPy arrays 59 | df = load_image(file_path) 60 | elif file_path.lower().endswith( 61 | (".mp4", ".avi", ".mov", ".mkv") 62 | ): # Video files 63 | video_frames, audio_array = process_video(file_path) 64 | # Store video frames and audio 65 | df = pd.DataFrame( 66 | { 67 | "video_frames": [video_frames.tobytes()], 68 | "shape": [video_frames.shape], 69 | "dtype": [video_frames.dtype.str], 70 | "audio_array": ( 71 | [audio_array.tobytes()] if audio_array is not None else None 72 | ), 73 | "audio_rate": [sr] if audio_array is not None else None, 74 | } 75 | ) 76 | 77 | elif file_path.lower().endswith((".mp3", ".wav", ".ogg")): # Audio files 78 | audio_array, sr = process_audio(file_path) 79 | df = pd.DataFrame( 80 | { 81 | "audio_array": [audio_array.tobytes()], 82 | "audio_rate": [sr], 83 | } 84 | ) 85 | else: 86 | # Attempt to load as text if no other type matches 87 | try: 88 | df = load_txt(file_path) 89 | except Exception as e: 90 | print(f"Could not load file: {e}") 91 | return 92 | 93 | # Store DataFrame in the database 94 | df.to_sql(table_name, conn, if_exists="replace", index=False) 95 | print(f"Data from '{file_path}' loaded into table '{table_name}'") 96 | 97 | except Exception as e: 98 | raise e # Re-raise the exception for handling in enter_observation_mode 99 | 100 | 101 | def create_new_table(cursor: sqlite3.Cursor, conn: sqlite3.Connection) -> None: 102 | """ 103 | Function Description: 104 | This function is used to create a new table. 105 | Args: 106 | cursor : sqlite3.Cursor : The SQLite cursor. 107 | conn : sqlite3.Connection : The SQLite connection. 108 | Keyword Args: 109 | None 110 | Returns: 111 | None 112 | """ 113 | 114 | table_name = input("Enter new table name: ").strip() 115 | columns = input("Enter column names separated by commas: ").strip() 116 | 117 | create_query = ( 118 | f"CREATE TABLE {table_name} (id INTEGER PRIMARY KEY AUTOINCREMENT, {columns})" 119 | ) 120 | cursor.execute(create_query) 121 | conn.commit() 122 | print(f"Table '{table_name}' created successfully.") 123 | 124 | 125 | def delete_table(cursor: sqlite3.Cursor, conn: sqlite3.Connection) -> None: 126 | """ 127 | Function Description: 128 | This function is used to delete a table. 129 | Args: 130 | cursor : sqlite3.Cursor : The SQLite cursor. 131 | conn : sqlite3.Connection : The SQLite connection. 132 | Keyword Args: 133 | None 134 | Returns: 135 | None 136 | """ 137 | 138 | table_name = input("Enter table name to delete: ").strip() 139 | cursor.execute(f"DROP TABLE IF EXISTS {table_name}") 140 | conn.commit() 141 | print(f"Table '{table_name}' deleted successfully.") 142 | 143 | 144 | def add_observation( 145 | cursor: sqlite3.Cursor, conn: sqlite3.Connection, table_name: str 146 | ) -> None: 147 | """ 148 | Function Description: 149 | This function is used to add an observation. 150 | Args: 151 | cursor : sqlite3.Cursor : The SQLite cursor. 152 | conn : sqlite3.Connection : The SQLite connection. 153 | table_name : str : The table name. 154 | Keyword Args: 155 | None 156 | Returns: 157 | None 158 | """ 159 | 160 | cursor.execute(f"PRAGMA table_info({table_name})") 161 | columns = [column[1] for column in cursor.fetchall() if column[1] != "id"] 162 | 163 | values = [] 164 | for column in columns: 165 | value = input(f"Enter value for {column}: ").strip() 166 | values.append(value) 167 | 168 | insert_query = f"INSERT INTO {table_name} ({','.join(columns)}) VALUES ({','.join(['?' for _ in columns])})" 169 | cursor.execute(insert_query, values) 170 | conn.commit() 171 | print("Observation added successfully.") 172 | -------------------------------------------------------------------------------- /npcpy/data/load.py: -------------------------------------------------------------------------------- 1 | import fitz # PyMuPDF 2 | import pandas as pd 3 | import json 4 | import io 5 | from PIL import Image 6 | import numpy as np 7 | from typing import Optional 8 | 9 | import os 10 | 11 | def load_csv(file_path): 12 | df = pd.read_csv(file_path) 13 | return df 14 | 15 | 16 | def load_json(file_path): 17 | with open(file_path, "r") as f: 18 | data = json.load(f) 19 | df = pd.DataFrame(data) 20 | return df 21 | 22 | 23 | def load_txt(file_path): 24 | with open(file_path, "r") as f: 25 | text = f.read() 26 | df = pd.DataFrame({"text": [text]}) 27 | return df 28 | 29 | 30 | def load_excel(file_path): 31 | df = pd.read_excel(file_path) 32 | return df 33 | 34 | 35 | def load_image(file_path): 36 | img = Image.open(file_path) 37 | img_array = np.array(img) 38 | df = pd.DataFrame( 39 | { 40 | "image_array": [img_array.tobytes()], 41 | "shape": [img_array.shape], 42 | "dtype": [img_array.dtype.str], 43 | } 44 | ) 45 | return df 46 | 47 | 48 | def load_pdf(file_path): 49 | pdf_document = fitz.open(file_path) 50 | texts = [] 51 | images = [] 52 | 53 | for page_num, page in enumerate(pdf_document): 54 | # Extract text 55 | text = page.get_text() 56 | texts.append({"page": page_num + 1, "content": text}) 57 | 58 | # Extract images 59 | image_list = page.get_images(full=True) 60 | for img_index, img in enumerate(image_list): 61 | xref = img[0] 62 | base_image = pdf_document.extract_image(xref) 63 | image_bytes = base_image["image"] 64 | 65 | # Convert image to numpy array 66 | image = Image.open(io.BytesIO(image_bytes)) 67 | img_array = np.array(image) 68 | 69 | images.append( 70 | { 71 | "page": page_num + 1, 72 | "index": img_index + 1, 73 | "array": img_array.tobytes(), 74 | "shape": img_array.shape, 75 | "dtype": str(img_array.dtype), 76 | } 77 | ) 78 | 79 | # Create DataFrame 80 | df = pd.DataFrame( 81 | {"texts": json.dumps(texts), "images": json.dumps(images)}, index=[0] 82 | ) 83 | 84 | return df 85 | 86 | 87 | extension_map = { 88 | "PNG": "images", 89 | "JPG": "images", 90 | "JPEG": "images", 91 | "GIF": "images", 92 | "SVG": "images", 93 | "MP4": "videos", 94 | "AVI": "videos", 95 | "MOV": "videos", 96 | "WMV": "videos", 97 | "MPG": "videos", 98 | "MPEG": "videos", 99 | "DOC": "documents", 100 | "DOCX": "documents", 101 | "PDF": "documents", 102 | "PPT": "documents", 103 | "PPTX": "documents", 104 | "XLS": "documents", 105 | "XLSX": "documents", 106 | "TXT": "documents", 107 | "CSV": "documents", 108 | "ZIP": "archives", 109 | "RAR": "archives", 110 | "7Z": "archives", 111 | "TAR": "archives", 112 | "GZ": "archives", 113 | "BZ2": "archives", 114 | "ISO": "archives", 115 | } 116 | 117 | 118 | def load_file_contents(file_path, chunk_size=250): 119 | """ 120 | Load and format the contents of a file based on its extension. 121 | Returns a list of chunks from the file content. 122 | """ 123 | file_ext = os.path.splitext(file_path)[1].upper().lstrip('.') 124 | chunks = [] 125 | 126 | try: 127 | if file_ext == 'PDF': 128 | # Load PDF content 129 | pdf_document = fitz.open(file_path) 130 | full_text = "" 131 | 132 | # Extract text from each page 133 | for page in pdf_document: 134 | full_text += page.get_text() + "\n\n" 135 | 136 | # Chunk the text 137 | for i in range(0, len(full_text), chunk_size): 138 | chunk = full_text[i:i+chunk_size].strip() 139 | if chunk: # Skip empty chunks 140 | chunks.append(chunk) 141 | 142 | elif file_ext == 'CSV': 143 | df = pd.read_csv(file_path) 144 | # Add metadata as first chunk 145 | meta = f"CSV Columns: {', '.join(df.columns)}\nRows: {len(df)}" 146 | chunks.append(meta) 147 | 148 | # Convert sample data to string and chunk it 149 | sample = df.head(20).to_string() 150 | for i in range(0, len(sample), chunk_size): 151 | chunk = sample[i:i+chunk_size].strip() 152 | if chunk: 153 | chunks.append(chunk) 154 | 155 | elif file_ext in ['XLS', 'XLSX']: 156 | df = pd.read_excel(file_path) 157 | # Add metadata as first chunk 158 | meta = f"Excel Columns: {', '.join(df.columns)}\nRows: {len(df)}" 159 | chunks.append(meta) 160 | 161 | # Convert sample data to string and chunk it 162 | sample = df.head(20).to_string() 163 | for i in range(0, len(sample), chunk_size): 164 | chunk = sample[i:i+chunk_size].strip() 165 | if chunk: 166 | chunks.append(chunk) 167 | 168 | elif file_ext == 'TXT': 169 | with open(file_path, 'r', encoding='utf-8') as f: 170 | content = f.read() 171 | 172 | # Chunk the text 173 | for i in range(0, len(content), chunk_size): 174 | chunk = content[i:i+chunk_size].strip() 175 | if chunk: 176 | chunks.append(chunk) 177 | 178 | elif file_ext == 'JSON': 179 | with open(file_path, 'r', encoding='utf-8') as f: 180 | data = json.load(f) 181 | content = json.dumps(data, indent=2) 182 | 183 | # Chunk the JSON 184 | for i in range(0, len(content), chunk_size): 185 | chunk = content[i:i+chunk_size].strip() 186 | if chunk: 187 | chunks.append(chunk) 188 | 189 | else: 190 | chunks.append(f"Unsupported file format: {file_ext}") 191 | 192 | return chunks 193 | 194 | except Exception as e: 195 | return [f"Error loading file {file_path}: {str(e)}"] -------------------------------------------------------------------------------- /npcpy/data/text.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional, Union 2 | import os 3 | import numpy as np 4 | try: 5 | from sentence_transformers import SentenceTransformer, util 6 | except: 7 | pass 8 | 9 | def rag_search( 10 | query: str, 11 | text_data: Union[Dict[str, str], str], 12 | embedding_model: Any = None, 13 | text_data_embedded: Optional[Dict[str, np.ndarray]] = None, 14 | similarity_threshold: float = 0.3, 15 | device="cpu", 16 | ) -> List[str]: 17 | """ 18 | Function Description: 19 | This function retrieves lines from documents that are relevant to the query. 20 | Args: 21 | query: The query string. 22 | text_data: A dictionary with file paths as keys and file contents as values. 23 | embedding_model: The sentence embedding model. 24 | Keyword Args: 25 | text_data_embedded: A dictionary with file paths as keys and embedded file contents as values. 26 | similarity_threshold: The similarity threshold for considering a line relevant. 27 | Returns: 28 | A list of relevant snippets. 29 | 30 | """ 31 | if embedding_model is None: 32 | try: 33 | embedding_model = SentenceTransformer("all-MiniLM-L6-v2") 34 | except: 35 | raise Exception( 36 | "Please install the sentence-transformers library to use this function or provide an embedding transformer model." 37 | ) 38 | results = [] 39 | 40 | # Compute the embedding of the query 41 | query_embedding = embedding_model.encode( 42 | query, convert_to_tensor=True, show_progress_bar=False 43 | ) 44 | if isinstance(text_data, str): 45 | # split at the sentence level 46 | lines = text_data.split(".") 47 | if not lines: 48 | return results 49 | # Compute embeddings for each line 50 | if text_data_embedded is None: 51 | line_embeddings = embedding_model.encode(lines, convert_to_tensor=True) 52 | else: 53 | line_embeddings = text_data_embedded 54 | # Compute cosine similarities 55 | cosine_scores = util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy() 56 | 57 | # Find indices of lines above the similarity threshold 58 | relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0] 59 | # print("relevant_line_indices", cosine_scores) 60 | # print(np.mean(cosine_scores)) 61 | # print(np.max(cosine_scores)) 62 | 63 | for idx in relevant_line_indices: 64 | idx = int(idx) 65 | # Get context lines (±10 lines) 66 | start_idx = max(0, idx - 10) 67 | end_idx = min(len(lines), idx + 11) # +11 because end index is exclusive 68 | snippet = ". ".join(lines[start_idx:end_idx]) 69 | results.append(snippet) 70 | 71 | elif isinstance(text_data, dict): 72 | for filename, content in text_data.items(): 73 | # Split content into lines 74 | lines = content.split("\n") 75 | if not lines: 76 | continue 77 | # Compute embeddings for each line 78 | if text_data_embedded is None: 79 | line_embeddings = embedding_model.encode(lines, convert_to_tensor=True) 80 | else: 81 | line_embeddings = text_data_embedded[filename] 82 | # Compute cosine similarities 83 | cosine_scores = ( 84 | util.cos_sim(query_embedding, line_embeddings)[0].cpu().numpy() 85 | ) 86 | 87 | # Find indices of lines above the similarity threshold 88 | ##print("most similar", np.max(cosine_scores)) 89 | ##print("most similar doc", lines[np.argmax(cosine_scores)]) 90 | relevant_line_indices = np.where(cosine_scores >= similarity_threshold)[0] 91 | # print("relevant_line_indices", cosine_scores) 92 | # print(np.mean(cosine_scores)) 93 | # print(np.max(cosine_scores)) 94 | for idx in relevant_line_indices: 95 | idx = int(idx) # Ensure idx is an integer 96 | # Get context lines (±10 lines) 97 | start_idx = max(0, idx - 10) 98 | end_idx = min( 99 | len(lines), idx + 11 100 | ) # +11 because end index is exclusive 101 | snippet = "\n".join(lines[start_idx:end_idx]) 102 | results.append((filename, snippet)) 103 | # print("results", results) 104 | return results 105 | 106 | 107 | 108 | 109 | def load_all_files( 110 | directory: str, extensions: List[str] = None, depth: int = 1 111 | ) -> Dict[str, str]: 112 | """ 113 | Function Description: 114 | This function loads all text files in a directory and its subdirectories. 115 | Args: 116 | directory: The directory to search. 117 | Keyword Args: 118 | extensions: A list of file extensions to include. 119 | depth: The depth of subdirectories to search. 120 | Returns: 121 | A dictionary with file paths as keys and file contents as values. 122 | """ 123 | text_data = {} 124 | if depth < 1: 125 | return text_data # Reached the specified depth, stop recursion. 126 | 127 | if extensions is None: 128 | # Default to common text file extensions 129 | extensions = [ 130 | ".txt", 131 | ".md", 132 | ".py", 133 | ".java", 134 | ".c", 135 | ".cpp", 136 | ".html", 137 | ".css", 138 | ".js", 139 | ".ts", 140 | ".tsx", 141 | ".npc", 142 | # Add more extensions if needed 143 | ] 144 | 145 | try: 146 | # List all entries in the directory 147 | entries = os.listdir(directory) 148 | except Exception as e: 149 | print(f"Could not list directory {directory}: {e}") 150 | return text_data 151 | 152 | for entry in entries: 153 | path = os.path.join(directory, entry) 154 | if os.path.isfile(path): 155 | if any(path.endswith(ext) for ext in extensions): 156 | try: 157 | with open(path, "r", encoding="utf-8", errors="ignore") as file: 158 | text_data[path] = file.read() 159 | except Exception as e: 160 | print(f"Could not read file {path}: {e}") 161 | elif os.path.isdir(path): 162 | # Recurse into subdirectories, decreasing depth by 1 163 | subdir_data = load_all_files(path, extensions, depth=depth - 1) 164 | text_data.update(subdir_data) 165 | 166 | return text_data 167 | -------------------------------------------------------------------------------- /npcpy/data/video.py: -------------------------------------------------------------------------------- 1 | # video.py 2 | 3 | 4 | def process_video(file_path, table_name): 5 | # implement with moon dream 6 | 7 | embeddings = [] 8 | texts = [] 9 | try: 10 | video = cv2.VideoCapture(file_path) 11 | fps = video.get(cv2.CAP_PROP_FPS) 12 | frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) 13 | 14 | for i in range(frame_count): 15 | ret, frame = video.read() 16 | if not ret: 17 | break 18 | 19 | # Process every nth frame (adjust n as needed for performance) 20 | n = 10 # Process every 10th frame 21 | if i % n == 0: 22 | # Image Embeddings 23 | _, buffer = cv2.imencode(".jpg", frame) # Encode frame as JPG 24 | base64_image = base64.b64encode(buffer).decode("utf-8") 25 | image_info = { 26 | "filename": f"frame_{i}.jpg", 27 | "file_path": f"data:image/jpeg;base64,{base64_image}", 28 | } # Use data URL for OpenAI 29 | image_embedding_response = get_llm_response( 30 | "Describe this image.", 31 | image=image_info, 32 | model="gpt-4", 33 | provider="openai", 34 | ) # Replace with your image embedding model 35 | if ( 36 | isinstance(image_embedding_response, dict) 37 | and "error" in image_embedding_response 38 | ): 39 | print( 40 | f"Error generating image embedding: {image_embedding_response['error']}" 41 | ) 42 | else: 43 | # Assuming your image embedding model returns a textual description 44 | embeddings.append(image_embedding_response) 45 | texts.append(f"Frame {i}: {image_embedding_response}") 46 | 47 | video.release() 48 | return embeddings, texts 49 | 50 | except Exception as e: 51 | print(f"Error processing video: {e}") 52 | return [], [] # Return empty lists in case of error 53 | -------------------------------------------------------------------------------- /npcpy/data/web.py: -------------------------------------------------------------------------------- 1 | # search.py 2 | 3 | import requests 4 | import os 5 | 6 | from bs4 import BeautifulSoup 7 | from duckduckgo_search import DDGS 8 | from duckduckgo_search.exceptions import DuckDuckGoSearchException 9 | 10 | try: 11 | from googlesearch import search 12 | except: 13 | pass 14 | from typing import List, Dict, Any, Optional, Union 15 | import numpy as np 16 | import json 17 | 18 | try: 19 | from sentence_transformers import util, SentenceTransformer 20 | except: 21 | pass 22 | 23 | 24 | def search_perplexity( 25 | query: str, 26 | api_key: str = None, 27 | model: str = "sonar", 28 | max_tokens: int = 400, 29 | temperature: float = 0.2, 30 | top_p: float = 0.9, 31 | ): 32 | if api_key is None: 33 | api_key = os.environ["PERPLEXITY_API_KEY"] 34 | # print("api_key", api_key) 35 | url = "https://api.perplexity.ai/chat/completions" 36 | payload = { 37 | "model": "sonar", 38 | "messages": [ 39 | {"role": "system", "content": "Be precise and concise."}, 40 | {"role": "user", "content": query}, 41 | ], 42 | "max_tokens": max_tokens, 43 | "temperature": temperature, 44 | "top_p": top_p, 45 | "return_images": False, 46 | "return_related_questions": False, 47 | "search_recency_filter": "month", 48 | "top_k": 0, 49 | "stream": False, 50 | "presence_penalty": 0, 51 | "frequency_penalty": 1, 52 | "response_format": None, 53 | } 54 | 55 | # Headers for the request, including the Authorization bearer token 56 | headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} 57 | 58 | # Make the POST request to the API 59 | response = requests.post(url, json=payload, headers=headers) 60 | response = json.loads(response.text) 61 | #print(response) 62 | return [response["choices"][0]["message"]["content"], response["citations"]] 63 | 64 | 65 | def search_web( 66 | query: str, 67 | num_results: int = 5, 68 | provider: str=None, 69 | api_key=None, 70 | perplexity_kwargs: Optional[Dict[str, Any]] = None, 71 | ) -> List[Dict[str, str]]: 72 | """ 73 | Function Description: 74 | This function searches the web for information based on a query. 75 | Args: 76 | query: The search query. 77 | Keyword Args: 78 | num_results: The number of search results to retrieve. 79 | provider: The search engine provider to use ('google' or 'duckduckgo'). 80 | Returns: 81 | A list of dictionaries with 'title', 'link', and 'content' keys. 82 | """ 83 | if perplexity_kwargs is None: 84 | perplexity_kwargs = {} 85 | results = [] 86 | if provider is None: 87 | provider = 'duckduckgo' 88 | 89 | if provider == "perplexity": 90 | search_result = search_perplexity(query, api_key=api_key, **perplexity_kwargs) 91 | # print(search_result, type(search_result)) 92 | return search_result 93 | 94 | if provider == "duckduckgo": 95 | headers = { 96 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0" 97 | } 98 | ddgs = DDGS(headers=headers) 99 | 100 | try: 101 | search_results = ddgs.text(query, max_results=num_results) 102 | urls = [r["href"] for r in search_results] 103 | results = [ 104 | {"title": r["title"], "link": r["href"], "content": r["body"]} 105 | for r in search_results 106 | ] 107 | except DuckDuckGoSearchException as e: 108 | print("DuckDuckGo search failed: ", e) 109 | urls = [] 110 | results = [] 111 | 112 | elif provider =='google': # google 113 | urls = list(search(query, num_results=num_results)) 114 | # google shit doesnt seem to be working anymore, apparently a lbock they made on browsers without js? 115 | #print("urls", urls) 116 | #print(provider) 117 | for url in urls: 118 | try: 119 | # Fetch the webpage content 120 | headers = { 121 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" 122 | } 123 | response = requests.get(url, headers=headers, timeout=5) 124 | response.raise_for_status() 125 | 126 | # Parse with BeautifulSoup 127 | soup = BeautifulSoup(response.text, "html.parser") 128 | 129 | # Get title and content 130 | title = soup.title.string if soup.title else url 131 | 132 | # Extract text content and clean it up 133 | content = " ".join([p.get_text() for p in soup.find_all("p")]) 134 | content = " ".join(content.split()) # Clean up whitespace 135 | 136 | results.append( 137 | { 138 | "title": title, 139 | "link": url, 140 | "content": ( 141 | content[:500] + "..." if len(content) > 500 else content 142 | ), 143 | } 144 | ) 145 | 146 | except Exception as e: 147 | print(f"Error fetching {url}: {str(e)}") 148 | continue 149 | 150 | # except Exception as e: 151 | # print(f"Search error: {str(e)}") 152 | content_str = "\n".join( 153 | [r["content"] + "\n Citation: " + r["link"] + "\n\n\n" for r in results] 154 | ) 155 | link_str = "\n".join([r["link"] + "\n" for r in results]) 156 | return [content_str, link_str] 157 | 158 | -------------------------------------------------------------------------------- /npcpy/gen/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/gen/__init__.py -------------------------------------------------------------------------------- /npcpy/gen/embeddings.py: -------------------------------------------------------------------------------- 1 | ####### 2 | ####### 3 | ####### 4 | ####### 5 | ####### EMBEDDINGS 6 | ####### 7 | from typing import List, Dict, Optional 8 | import numpy as np 9 | from datetime import datetime 10 | 11 | try: 12 | from openai import OpenAI 13 | import anthropic 14 | except: 15 | pass 16 | 17 | def get_ollama_embeddings( 18 | texts: List[str], model: str = "nomic-embed-text" 19 | ) -> List[List[float]]: 20 | """Generate embeddings using Ollama.""" 21 | import ollama 22 | 23 | embeddings = [] 24 | for text in texts: 25 | response = ollama.embeddings(model=model, prompt=text) 26 | embeddings.append(response["embedding"]) 27 | return embeddings 28 | 29 | 30 | def get_openai_embeddings( 31 | texts: List[str], model: str = "text-embedding-3-small" 32 | ) -> List[List[float]]: 33 | """Generate embeddings using OpenAI.""" 34 | client = OpenAI() 35 | response = client.embeddings.create(input=texts, model=model) 36 | return [embedding.embedding for embedding in response.data] 37 | 38 | 39 | 40 | 41 | def store_embeddings_for_model( 42 | texts, 43 | embeddings, 44 | chroma_client, 45 | model, 46 | provider, 47 | metadata=None, 48 | ): 49 | collection_name = f"{provider}_{model}_embeddings" 50 | collection = chroma_client.get_collection(collection_name) 51 | 52 | # Create meaningful metadata for each document (adjust as necessary) 53 | if metadata is None: 54 | metadata = [{"text_length": len(text)} for text in texts] # Example metadata 55 | print( 56 | "metadata is none, creating metadata for each document as the length of the text" 57 | ) 58 | # Add embeddings to the collection with metadata 59 | collection.add( 60 | ids=[str(i) for i in range(len(texts))], 61 | embeddings=embeddings, 62 | metadatas=metadata, # Passing populated metadata 63 | documents=texts, 64 | ) 65 | 66 | 67 | def delete_embeddings_from_collection(collection, ids): 68 | """Delete embeddings by id from Chroma collection.""" 69 | if ids: 70 | collection.delete(ids=ids) # Only delete if ids are provided 71 | 72 | 73 | def get_embeddings( 74 | texts: List[str], 75 | model: str , 76 | provider: str, 77 | ) -> List[List[float]]: 78 | """Generate embeddings using the specified provider and store them in Chroma.""" 79 | if provider == "ollama": 80 | embeddings = get_ollama_embeddings(texts, model) 81 | elif provider == "openai": 82 | embeddings = get_openai_embeddings(texts, model) 83 | else: 84 | raise ValueError(f"Unsupported provider: {provider}") 85 | 86 | # Store the embeddings in the relevant Chroma collection 87 | # store_embeddings_for_model(texts, embeddings, model, provider) 88 | return embeddings 89 | -------------------------------------------------------------------------------- /npcpy/gen/video_gen.py: -------------------------------------------------------------------------------- 1 | def generate_video_diffusers( 2 | prompt, 3 | model, 4 | npc=None, 5 | device="gpu", 6 | output_path="", 7 | num_inference_steps=5, 8 | num_frames=25, 9 | height=256, 10 | width=256, 11 | ): 12 | 13 | import torch 14 | from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler 15 | import numpy as np 16 | import os 17 | import cv2 18 | 19 | # Load pipeline 20 | pipe = DiffusionPipeline.from_pretrained( 21 | "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float32 22 | ).to(device) 23 | pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) 24 | 25 | output = pipe( 26 | prompt, 27 | num_inference_steps=num_inference_steps, 28 | num_frames=num_frames, 29 | height=height, 30 | width=width, 31 | ) 32 | 33 | def save_frames_to_video(frames, output_path, fps=8): 34 | """Handle the specific 5D array format (1, num_frames, H, W, 3) with proper type conversion""" 35 | # Verify input format 36 | if not ( 37 | isinstance(frames, np.ndarray) 38 | and frames.ndim == 5 39 | and frames.shape[-1] == 3 40 | ): 41 | raise ValueError( 42 | f"Unexpected frame format. Expected 5D RGB array, got {frames.shape}" 43 | ) 44 | 45 | # Remove batch dimension and convert to 0-255 uint8 46 | frames = (frames[0] * 255).astype(np.uint8) # Shape: (num_frames, H, W, 3) 47 | 48 | # Get video dimensions 49 | height, width = frames.shape[1:3] 50 | 51 | # Create video writer 52 | fourcc = cv2.VideoWriter_fourcc(*"mp4v") 53 | video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) 54 | 55 | if not video_writer.isOpened(): 56 | raise IOError(f"Could not open video writer for {output_path}") 57 | 58 | # Write frames (convert RGB to BGR for OpenCV) 59 | for frame in frames: 60 | video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) 61 | 62 | video_writer.release() 63 | print(f"Successfully saved {frames.shape[0]} frames to {output_path}") 64 | 65 | os.makedirs("~/.npcsh/videos/", exist_ok=True) 66 | if output_path == "": 67 | 68 | output_path = "~/.npcsh/videos/" + prompt[0:8] + ".mp4" 69 | save_frames_to_video(output.frames, output_path) 70 | return output_path 71 | -------------------------------------------------------------------------------- /npcpy/main.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | print("Hello from npcsh!") 3 | 4 | if __name__ == "__main__": 5 | main() -------------------------------------------------------------------------------- /npcpy/memory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/memory/__init__.py -------------------------------------------------------------------------------- /npcpy/memory/deep_research.py: -------------------------------------------------------------------------------- 1 | #deep_research 2 | import numpy as np 3 | from npcpy.npc_compiler import NPC 4 | 5 | from npcpy.memory.knowledge_graph import * 6 | import os 7 | 8 | from npcpy.data import sample_primary_directives 9 | 10 | def generate_random_npcs(num_npcs, model, provider): 11 | """ 12 | Function Description: 13 | This function generates a list of random NPCs. 14 | Args: 15 | num_npcs (int): The number of NPCs to generate. 16 | Returns: 17 | List[NPC]: A list of generated NPCs. 18 | """ 19 | # Initialize the list of NPCs 20 | npcs = [] 21 | 22 | # Generate the NPCs 23 | for i, primary_directive in np.random.choice(sample_primary_directives, num_npcs): 24 | npc = NPC(primary_directive=primary_directive, 25 | model=model, 26 | provider=provider,) 27 | 28 | npcs.append(npc) 29 | return npcs 30 | 31 | def generate_research_chain(request, npc, depth, memory=5, context=None): 32 | """ 33 | Function Description: 34 | This function generates a research chain for the given NPC. 35 | Args: 36 | npc (NPC): The NPC for which to generate the research chain. 37 | depth (int): The depth of the research chain. 38 | context (str, optional): Additional context for the research chain. Defaults to None. 39 | Returns: 40 | List[str]: A list of generated research chains. 41 | """ 42 | chain = [] 43 | first_message = f'the user has requested that you research the following: {request}. Please begin providing a single specific question to ask. ' 44 | if context: 45 | first_message += f'The user also provided this context: {context}' 46 | summary, question_raised = npc.search_and_ask(first_message) 47 | chain.append(first_message) 48 | chain.append(summary) 49 | chain.append(question_raised) 50 | 51 | 52 | 53 | for i in range(depth): 54 | memories = chain[-memory:] 55 | next_message = "\n".join(memories) + 'Last Search Summary: ' + summary + '. New Question' 56 | 57 | summary, question_raised = npc.search_and_ask(next_message) 58 | chain.append(next_message) 59 | chain.append(summary) 60 | chain.append(question_raised) 61 | return chain 62 | 63 | 64 | def prune_chains(): 65 | return 66 | 67 | 68 | 69 | # search and ask will have a check llm command more or less. 70 | def consolidate_research(chains, facts, groups, model, provider): 71 | prompt = f''' 72 | You are a research advisor reviewing the notes of your research assisitants who have been working on a request. 73 | The results from their efforts are contained here: 74 | 75 | {chains} 76 | 77 | Please identify the 3 most common ideas, the 3 most unusual ideas, and the 3 most important ideas. 78 | 79 | 80 | Provide your response as a json object with a list of json objects for "most_common_ideas", "most_unusual_ideas" and "most_important_ideas". 81 | 82 | Each of those json objects within the sublists should be structured like so: 83 | {{ 84 | 'idea': 'the idea', 85 | 'source_npc': 'the name of the npc chain that provided this idea', 86 | 'supporting_links': [ 87 | 'link1/to/local/file', 88 | 'link2/to/web/site', 89 | ], 90 | 'supporting_evidence' : [ 91 | 'script x was run by npc and verified this idea ', 92 | 'npc found evidence in site x y was run by npc and verified this idea ', 93 | ] 94 | }} 95 | 96 | The links should be a list of links to the original sources of the information that were contained within the chains themselves. 97 | The supporting evidence should be a list of the evidence that was used to support the idea. 98 | ''' 99 | ideas = get_llm_response(prompt, model=model, provider=provider, format='json') 100 | # build knowledge graph 101 | 102 | groups = identify_groups(facts, model=model, provider=provider) 103 | 104 | prompt = f''' 105 | You are a research advisor reviewing the notes of your research assisitants who have been working on a request. 106 | The results from their efforts are contained here: 107 | 108 | {facts} 109 | 110 | Additionally, we have already found some common ideas and have produced the following groups: 111 | {groups} 112 | 113 | 114 | Please identify the 3 most common ideas, the 3 most unusual ideas, and the 3 most important ideas. 115 | Provide your response as a json object with 3 lists each containing 3 items. 116 | 117 | ''' 118 | ideas_summarized = get_llm_response(prompt, model=model, provider=provider) 119 | 120 | return ideas, ideas_summarized 121 | 122 | 123 | 124 | ## ultimately wwell do the vector store in the main db. so when we eventually starti adding new facts well do so by checking similar facts 125 | # there and then if were doing the rag search well do a rag and then graph 126 | -------------------------------------------------------------------------------- /npcpy/migrations/migrate_add_team_v0337.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | import sys 4 | 5 | # --- Configuration --- 6 | # Make sure this matches the path used in your CommandHistory class 7 | DB_PATH = os.path.expanduser("~/npcsh_history.db") 8 | TABLE_NAME = "conversation_history" 9 | COLUMN_NAME = "team" 10 | COLUMN_TYPE = "TEXT" # Or whatever type you intend (TEXT is usually safe for this) 11 | # --- End Configuration --- 12 | 13 | def add_column_if_not_exists(db_path, table_name, column_name, column_type): 14 | """Adds a column to a table if it doesn't already exist.""" 15 | conn = None 16 | try: 17 | print(f"Connecting to database: {db_path}") 18 | if not os.path.exists(db_path): 19 | print(f"Error: Database file not found at {db_path}", file=sys.stderr) 20 | return False 21 | 22 | conn = sqlite3.connect(db_path) 23 | cursor = conn.cursor() 24 | 25 | # Check if the column already exists 26 | cursor.execute(f"PRAGMA table_info({table_name});") 27 | columns = [info[1] for info in cursor.fetchall()] # Column names are at index 1 28 | 29 | if column_name in columns: 30 | print(f"Column '{column_name}' already exists in table '{table_name}'. No migration needed.") 31 | return True 32 | else: 33 | print(f"Column '{column_name}' not found in '{table_name}'. Attempting to add...") 34 | # Add the column. Using TEXT is generally safe. 35 | # You could add DEFAULT NULL if desired, but SQLite often handles this. 36 | alter_sql = f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_type};" 37 | print(f"Executing: {alter_sql}") 38 | cursor.execute(alter_sql) 39 | conn.commit() 40 | print(f"Successfully added column '{column_name}' to table '{table_name}'.") 41 | return True 42 | 43 | except sqlite3.Error as e: 44 | print(f"An SQLite error occurred: {e}", file=sys.stderr) 45 | # Check specifically for table not found error 46 | if f"no such table: {table_name}" in str(e): 47 | print(f"Error: The table '{table_name}' does not exist in the database.", file=sys.stderr) 48 | return False 49 | except Exception as e: 50 | print(f"An unexpected error occurred: {e}", file=sys.stderr) 51 | return False 52 | finally: 53 | if conn: 54 | conn.close() 55 | print("Database connection closed.") 56 | 57 | # --- Main execution --- 58 | if __name__ == "__main__": 59 | print("Starting database schema migration...") 60 | success = add_column_if_not_exists(DB_PATH, TABLE_NAME, COLUMN_NAME, COLUMN_TYPE) 61 | 62 | if success: 63 | # Verify 64 | print("\nVerifying schema...") 65 | try: 66 | conn_verify = sqlite3.connect(DB_PATH) 67 | cursor_verify = conn_verify.cursor() 68 | cursor_verify.execute(f"PRAGMA table_info({TABLE_NAME});") 69 | columns_after = [info[1] for info in cursor_verify.fetchall()] 70 | conn_verify.close() 71 | print(f"Columns in '{TABLE_NAME}' after migration: {columns_after}") 72 | if COLUMN_NAME in columns_after: 73 | print("Verification successful: Column found.") 74 | else: 75 | print("Verification failed: Column NOT found after migration attempt.", file=sys.stderr) 76 | except Exception as e: 77 | print(f"Verification failed with error: {e}", file=sys.stderr) 78 | 79 | print("\nMigration process finished.") 80 | else: 81 | print("\nMigration process failed.", file=sys.stderr) 82 | sys.exit(1) # Exit with error code if migration failed -------------------------------------------------------------------------------- /npcpy/mix/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/mix/__init__.py -------------------------------------------------------------------------------- /npcpy/mix/debate.py: -------------------------------------------------------------------------------- 1 | def run_debate(): 2 | """ 3 | Run a debate between two agents. 4 | """ 5 | 6 | pass 7 | -------------------------------------------------------------------------------- /npcpy/modes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/modes/__init__.py -------------------------------------------------------------------------------- /npcpy/modes/_state.py: -------------------------------------------------------------------------------- 1 | from npcpy.npc_sysenv import ( 2 | print_and_process_stream_with_markdown, 3 | NPCSH_STREAM_OUTPUT, 4 | NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER, 5 | NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, 6 | NPCSH_EMBEDDING_MODEL, NPCSH_EMBEDDING_PROVIDER, 7 | NPCSH_REASONING_MODEL, NPCSH_REASONING_PROVIDER, 8 | NPCSH_IMAGE_GEN_MODEL, NPCSH_IMAGE_GEN_PROVIDER, 9 | NPCSH_VIDEO_GEN_MODEL, NPCSH_VIDEO_GEN_PROVIDER, 10 | NPCSH_API_URL, 11 | NPCSH_DEFAULT_MODE, 12 | 13 | ) 14 | from npcpy.memory.command_history import ( 15 | start_new_conversation, 16 | ) 17 | from dataclasses import dataclass, field 18 | from typing import Optional, List, Dict, Any, Tuple, Union 19 | from npcpy.npc_compiler import NPC, Team 20 | import os 21 | @dataclass 22 | class ShellState: 23 | npc: Optional[Union[NPC, str]] = None 24 | team: Optional[Team] = None 25 | messages: List[Dict[str, Any]] = field(default_factory=list) 26 | mcp_client: Optional[Any] = None 27 | conversation_id: Optional[int] = None 28 | chat_model: str = NPCSH_CHAT_MODEL 29 | chat_provider: str = NPCSH_CHAT_PROVIDER 30 | vision_model: str = NPCSH_VISION_MODEL 31 | vision_provider: str = NPCSH_VISION_PROVIDER 32 | embedding_model: str = NPCSH_EMBEDDING_MODEL 33 | embedding_provider: str = NPCSH_EMBEDDING_PROVIDER 34 | reasoning_model: str = NPCSH_REASONING_MODEL 35 | reasoning_provider: str = NPCSH_REASONING_PROVIDER 36 | image_gen_model: str = NPCSH_IMAGE_GEN_MODEL 37 | image_gen_provider: str = NPCSH_IMAGE_GEN_PROVIDER 38 | video_gen_model: str = NPCSH_VIDEO_GEN_MODEL 39 | video_gen_provider: str = NPCSH_VIDEO_GEN_PROVIDER 40 | current_mode: str = NPCSH_DEFAULT_MODE 41 | api_key: Optional[str] = None 42 | api_url: Optional[str] = NPCSH_API_URL 43 | current_path: str = field(default_factory=os.getcwd) 44 | stream_output: bool = NPCSH_STREAM_OUTPUT 45 | attachments: Optional[List[Any]] = None 46 | def get_model_for_command(self, model_type: str = "chat"): 47 | if model_type == "chat": 48 | return self.chat_model, self.chat_provider 49 | elif model_type == "vision": 50 | return self.vision_model, self.vision_provider 51 | elif model_type == "embedding": 52 | return self.embedding_model, self.embedding_provider 53 | elif model_type == "reasoning": 54 | return self.reasoning_model, self.reasoning_provider 55 | elif model_type == "image_gen": 56 | return self.image_gen_model, self.image_gen_provider 57 | elif model_type == "video_gen": 58 | return self.video_gen_model, self.video_gen_provider 59 | else: 60 | return self.chat_model, self.chat_provider # Default fallback 61 | initial_state = ShellState( 62 | conversation_id=start_new_conversation(), 63 | stream_output=NPCSH_STREAM_OUTPUT, 64 | current_mode=NPCSH_DEFAULT_MODE, 65 | chat_model=NPCSH_CHAT_MODEL, 66 | chat_provider=NPCSH_CHAT_PROVIDER, 67 | vision_model=NPCSH_VISION_MODEL, 68 | vision_provider=NPCSH_VISION_PROVIDER, 69 | embedding_model=NPCSH_EMBEDDING_MODEL, 70 | embedding_provider=NPCSH_EMBEDDING_PROVIDER, 71 | reasoning_model=NPCSH_REASONING_MODEL, 72 | reasoning_provider=NPCSH_REASONING_PROVIDER, 73 | image_gen_model=NPCSH_IMAGE_GEN_MODEL, 74 | image_gen_provider=NPCSH_IMAGE_GEN_PROVIDER, 75 | video_gen_model=NPCSH_VIDEO_GEN_MODEL, 76 | video_gen_provider=NPCSH_VIDEO_GEN_PROVIDER, 77 | api_url=NPCSH_API_URL, 78 | ) 79 | -------------------------------------------------------------------------------- /npcpy/modes/plonk.py: -------------------------------------------------------------------------------- 1 | 2 | from npcpy.data.image import capture_screenshot 3 | from typing import Any, Dict 4 | import json 5 | import time 6 | from npcpy.llm_funcs import get_llm_response 7 | def execute_plonk_command(request, action_space, model=None, provider=None, npc=None): 8 | """ 9 | Main interaction loop with LLM for action determination 10 | 11 | Args: 12 | request (str): The task to be performed 13 | action_space (dict): Available action types and the inputs they require 14 | npc (optional): NPC object for context and screenshot 15 | **kwargs: Additional arguments for LLM response 16 | """ 17 | prompt = f""" 18 | Here is a request from a user: 19 | {request} 20 | 21 | Your job is to choose certain actions, take screenshots, 22 | and evaluate what the next step is to complete the task. 23 | 24 | You can choose from the following action types: 25 | {json.dumps(action_space)} 26 | 27 | 28 | 29 | Attached to the message is a screenshot of the current screen. 30 | 31 | Please use that information to determine the next steps. 32 | Your response must be a JSON with an 'actions' key containing a list of actions. 33 | Each action should have a 'type' and any necessary parameters.https://www.reddit.com 34 | 35 | 36 | For example: 37 | Your response should look like: 38 | 39 | {{ 40 | "actions": [ 41 | {{"type":"bash", "command":"firefox &"}}, 42 | {{"type": "click", "x": 5, "y": 5}}, 43 | {{'type': 'type', 'text': 'https://www.google.com'}} 44 | ] 45 | }} 46 | 47 | IF you have to type something, ensure that it iis first opened and selected. Do not 48 | begin with typing immediately. 49 | If you have to click, the numbers should range from 0 to 100 in x and y with 0,0 being in the upper left. 50 | 51 | 52 | IF you have accomplished the task, return an empty list. 53 | Do not include any additional markdown formatting. 54 | """ 55 | 56 | while True: 57 | # Capture screenshot using NPC-based method 58 | screenshot = capture_screenshot(npc=npc, full=True) 59 | 60 | # Ensure screenshot was captured successfully 61 | if not screenshot: 62 | print("Screenshot capture failed") 63 | return None 64 | 65 | # Get LLM response 66 | response = get_llm_response( 67 | prompt, 68 | images=[screenshot], 69 | model=model, 70 | provider=provider, 71 | npc=npc, 72 | format="json", 73 | ) 74 | # print("LLM Response:", response, type(response)) 75 | # Check if task is complete 76 | print(response["response"]) 77 | if not response["response"].get("actions", []): 78 | return response 79 | 80 | # Execute actions 81 | for action in response["response"]["actions"]: 82 | print("Performing action:", action) 83 | action_result = perform_action(action) 84 | perform_action({"type": "wait", "duration": 5}) 85 | 86 | # Optional: Add error handling or logging 87 | if action_result.get("status") == "error": 88 | print(f"Error performing action: {action_result.get('message')}") 89 | 90 | # Small delay between action batches 91 | time.sleep(1) 92 | 93 | 94 | def test_open_reddit(npc: Any = None): 95 | """ 96 | Test function to open a web browser and navigate to Reddit using plonk 97 | """ 98 | # Define the action space for web navigation 99 | 100 | # Request to navigate to Reddit 101 | request = "Open a web browser and go to reddit.com" 102 | 103 | # Determine the browser launch hotkey based on the operating system 104 | import platform 105 | 106 | system = platform.system() 107 | 108 | if system == "Darwin": # macOS 109 | browser_launch_keys = ["command", "space"] 110 | browser_search = "chrome" 111 | elif system == "Windows": 112 | browser_launch_keys = ["win", "r"] 113 | browser_search = "chrome" 114 | else: # Linux or other 115 | browser_launch_keys = ["alt", "f2"] 116 | browser_search = "firefox" 117 | 118 | # Perform the task using plonk 119 | result = plonk( 120 | request, 121 | action_space, 122 | model="gpt-4o-mini", 123 | provider="openai", 124 | ) 125 | 126 | # Optionally, you can add assertions or print results 127 | print("Reddit navigation test result:", result) 128 | 129 | return result 130 | -------------------------------------------------------------------------------- /npcpy/npc-python.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc-python.png -------------------------------------------------------------------------------- /npcpy/npc_team/alicanto.npc: -------------------------------------------------------------------------------- 1 | name: alicanto 2 | primary_directive: You are Alicanto the mythical bird. You have been spotted and it is your job to lead users to explore the world. 3 | -------------------------------------------------------------------------------- /npcpy/npc_team/alicanto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/alicanto.png -------------------------------------------------------------------------------- /npcpy/npc_team/assembly_lines/test_pipeline.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sqlalchemy import create_engine 3 | import os 4 | 5 | # Sample market events data 6 | market_events_data = { 7 | "datetime": [ 8 | "2023-10-15 09:00:00", 9 | "2023-10-16 10:30:00", 10 | "2023-10-17 11:45:00", 11 | "2023-10-18 13:15:00", 12 | "2023-10-19 14:30:00", 13 | ], 14 | "headline": [ 15 | "Stock Market Rallies Amid Positive Economic Data", 16 | "Tech Giant Announces New Product Line", 17 | "Federal Reserve Hints at Interest Rate Pause", 18 | "Oil Prices Surge Following Supply Concerns", 19 | "Retail Sector Reports Record Q3 Earnings", 20 | ], 21 | } 22 | 23 | # Create a DataFrame 24 | market_events_df = pd.DataFrame(market_events_data) 25 | 26 | # Define database path relative to user's home directory 27 | db_path = os.path.expanduser("~/npcsh_history.db") 28 | 29 | # Create a connection to the SQLite database 30 | engine = create_engine(f"sqlite:///{db_path}") 31 | with engine.connect() as connection: 32 | # Write the data to a new table 'market_events', replacing existing data 33 | market_events_df.to_sql( 34 | "market_events", con=connection, if_exists="replace", index=False 35 | ) 36 | 37 | print("Market events have been added to the database.") 38 | 39 | email_data = { 40 | "datetime": [ 41 | "2023-10-10 10:00:00", 42 | "2023-10-11 11:00:00", 43 | "2023-10-12 12:00:00", 44 | "2023-10-13 13:00:00", 45 | "2023-10-14 14:00:00", 46 | ], 47 | "subject": [ 48 | "Meeting Reminder", 49 | "Project Update", 50 | "Invoice Attached", 51 | "Weekly Report", 52 | "Holiday Notice", 53 | ], 54 | "sender": [ 55 | "alice@example.com", 56 | "bob@example.com", 57 | "carol@example.com", 58 | "dave@example.com", 59 | "eve@example.com", 60 | ], 61 | "recipient": [ 62 | "bob@example.com", 63 | "carol@example.com", 64 | "dave@example.com", 65 | "eve@example.com", 66 | "alice@example.com", 67 | ], 68 | "body": [ 69 | "Don't forget the meeting tomorrow at 10 AM.", 70 | "The project is progressing well, see attached update.", 71 | "Please find your invoice attached.", 72 | "Here is the weekly report.", 73 | "The office will be closed on holidays, have a great time!", 74 | ], 75 | } 76 | 77 | # Create a DataFrame 78 | emails_df = pd.DataFrame(email_data) 79 | 80 | # Define database path relative to user's home directory 81 | db_path = os.path.expanduser("~/npcsh_history.db") 82 | 83 | # Create a connection to the SQLite database 84 | engine = create_engine(f"sqlite:///{db_path}") 85 | with engine.connect() as connection: 86 | # Write the data to a new table 'emails', replacing existing data 87 | emails_df.to_sql("emails", con=connection, if_exists="replace", index=False) 88 | 89 | print("Sample emails have been added to the database.") 90 | 91 | 92 | from npcpy.npc_compiler import PipelineRunner 93 | import os 94 | 95 | pipeline_runner = PipelineRunner( 96 | pipeline_file="morning_routine.pipe", 97 | npc_root_dir=os.path.abspath("."), # Use absolute path to parent directory 98 | db_path="~/npcsh_history.db", 99 | ) 100 | pipeline_runner.execute_pipeline() 101 | 102 | 103 | import pandas as pd 104 | from sqlalchemy import create_engine 105 | import os 106 | 107 | # Sample data generation for news articles 108 | news_articles_data = { 109 | "news_article_id": list(range(1, 21)), 110 | "headline": [ 111 | "Economy sees unexpected growth in Q4", 112 | "New tech gadget takes the world by storm", 113 | "Political debate heats up over new policy", 114 | "Health concerns rise amid new disease outbreak", 115 | "Sports team secures victory in last minute", 116 | "New economic policy introduced by government", 117 | "Breakthrough in AI technology announced", 118 | "Political leader delivers speech on reforms", 119 | "Healthcare systems pushed to limits", 120 | "Celebrated athlete breaks world record", 121 | "Controversial economic measures spark debate", 122 | "Innovative tech startup gains traction", 123 | "Political scandal shakes administration", 124 | "Healthcare workers protest for better pay", 125 | "Major sports event postponed due to weather", 126 | "Trade tensions impact global economy", 127 | "Tech company accused of data breach", 128 | "Election results lead to political upheaval", 129 | "Vaccine developments offer hope amid pandemic", 130 | "Sports league announces return to action", 131 | ], 132 | "content": ["Article content here..." for _ in range(20)], 133 | "publication_date": pd.date_range(start="1/1/2023", periods=20, freq="D"), 134 | } 135 | 136 | # Create a DataFrame 137 | news_df = pd.DataFrame(news_articles_data) 138 | 139 | # Define the database path 140 | db_path = os.path.expanduser("~/npcsh_history.db") 141 | 142 | # Create a connection to the SQLite database 143 | engine = create_engine(f"sqlite:///{db_path}") 144 | with engine.connect() as connection: 145 | # Write the data to a new table 'news_articles', replacing existing data 146 | news_df.to_sql("news_articles", con=connection, if_exists="replace", index=False) 147 | 148 | print("News articles have been added to the database.") 149 | 150 | from npcpy.npc_compiler import PipelineRunner 151 | import os 152 | 153 | runner = PipelineRunner( 154 | "./news_analysis.pipe", 155 | db_path=os.path.expanduser("~/npcsh_history.db"), 156 | npc_root_dir=os.path.abspath("."), 157 | ) 158 | results = runner.execute_pipeline() 159 | 160 | print("\nResults:") 161 | print("\nClassifications (processed row by row):") 162 | print(results["classify_news"]) 163 | print("\nAnalysis (processed in batch):") 164 | print(results["analyze_news"]) 165 | 166 | 167 | from npcpy.npc_compiler import PipelineRunner 168 | import os 169 | 170 | runner = PipelineRunner( 171 | "./news_analysis_mixa.pipe", 172 | db_path=os.path.expanduser("~/npcsh_history.db"), 173 | npc_root_dir=os.path.abspath("."), 174 | ) 175 | results = runner.execute_pipeline() 176 | 177 | print("\nResults:") 178 | print("\nClassifications (processed row by row):") 179 | print(results["classify_news"]) 180 | print("\nAnalysis (processed in batch):") 181 | print(results["analyze_news"]) 182 | -------------------------------------------------------------------------------- /npcpy/npc_team/corca.npc: -------------------------------------------------------------------------------- 1 | name: corca 2 | primary_directive: | 3 | You are corca, a distinguished member of the NPC team. 4 | Your expertise is in the area of software development and 5 | you have a kanck for thinking through problems carefully. 6 | You favor solutions that prioritize simplicity and clarity and 7 | ought to always consider how some suggestion may increase rather than reduce tech debt 8 | unnecessarily. Now, the key is in this last term, "unnecessarily". 9 | You must distinguish carefully and when in doubt, opt to ask for further 10 | information or clarification with concrete clear options that make it 11 | easy for a user to choose. 12 | model: gpt-4o-mini 13 | provider: openai -------------------------------------------------------------------------------- /npcpy/npc_team/foreman.npc: -------------------------------------------------------------------------------- 1 | name: foreman 2 | primary_directive: You are the foreman of an NPC team. It is your duty 3 | to delegate tasks to your team members or to other specialized teams 4 | in order to complete the project. You are responsible for the 5 | completion of the project and the safety of your team members. 6 | model: gpt-4o-mini 7 | provider: openai 8 | -------------------------------------------------------------------------------- /npcpy/npc_team/frederic.npc: -------------------------------------------------------------------------------- 1 | name: frederic 2 | primary_directive: | 3 | You are frederic the polar bear. Your job is help users think through problems and 4 | to provide straightforward ways forward on problems. Cut through the ice 5 | to get to what matters and keep things simple. You are to respond in a 6 | witty tone like richard feynman but with the romantic tambor of Frederic Chopin. 7 | -------------------------------------------------------------------------------- /npcpy/npc_team/frederic4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/frederic4.png -------------------------------------------------------------------------------- /npcpy/npc_team/guac.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/guac.png -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/automator.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: automator 2 | description: Issue npc shell requests. Uses one of the NPC macros. 3 | inputs: 4 | - request 5 | - type 6 | steps: 7 | - engine: "python" 8 | code: | 9 | type = '{{type}}' 10 | request = '{{request}}' 11 | if type == 'plan': 12 | from npcpy.work.plan import execute_plan_command 13 | output = execute_plan_command(request, npc=npc) 14 | elif type == 'trigger': 15 | from npcpy.work.trigger import execute_trigger_command 16 | output = execute_trigger_command(request, npc=npc) 17 | else: 18 | raise ValueError("Invalid type. Must be 'plan' or 'trigger'.") -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/bash_executer.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: bash_executor 2 | description: Execute bash queries. 3 | inputs: 4 | - bash_command 5 | - user_request 6 | steps: 7 | - engine: python 8 | code: | 9 | import subprocess 10 | import os 11 | cmd = '{{bash_command}}' # Properly quote the command input 12 | def run_command(cmd): 13 | process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 14 | stdout, stderr = process.communicate() 15 | if stderr: 16 | print(f"Error: {stderr.decode('utf-8')}") 17 | return stderr 18 | return stdout 19 | result = run_command(cmd) 20 | output = result.decode('utf-8') 21 | 22 | - engine: natural 23 | code: | 24 | 25 | Here is the result of the bash command: 26 | ``` 27 | {{ output }} 28 | ``` 29 | This was the original user request: {{ user_request }} 30 | 31 | Please provide a response accordingly. -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/calculator.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: calculator 2 | description: A jinx to simplify and evaluate mathematical expressions 3 | inputs: 4 | - expression 5 | steps: 6 | - code: "output = eval(''{{ expression }}'')" 7 | engine: python 8 | -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/edit_file.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: file_editor 2 | description: Examines a file, determines what changes are needed, and applies those 3 | changes. 4 | inputs: 5 | - file_path 6 | - edit_instructions 7 | - backup: true 8 | steps: 9 | - name: "edit_file" 10 | engine: "python" 11 | code: | 12 | import os 13 | from npcpy.llm_funcs import get_llm_response 14 | 15 | # Get inputs 16 | file_path = os.path.expanduser("{{ file_path }}") 17 | edit_instructions = "{{ edit_instructions }}" 18 | backup_str = "{{ backup }}" 19 | create_backup = backup_str.lower() not in ('false', 'no', '0', '') 20 | 21 | # Read file content 22 | with open(file_path, 'r') as f: 23 | original_content = f.read() 24 | 25 | # Create backup if requested 26 | if create_backup: 27 | backup_path = file_path + ".bak" 28 | with open(backup_path, 'w') as f: 29 | f.write(original_content) 30 | 31 | # Make the prompt for the LLM 32 | prompt = """You are a code editing assistant. Analyze this file and make the requested changes. 33 | 34 | File content: 35 | """ + original_content + """ 36 | 37 | Edit instructions: """ + edit_instructions + """ 38 | 39 | Return a JSON object with these fields: 40 | 1. "modifications": An array of modification objects, where each object has: 41 | - "type": One of "replace", "insert_after", "insert_before", or "delete" 42 | - "target": For "insert_after" and "insert_before", the text to insert after/before 43 | For "delete", the text to delete 44 | - "original": For "replace", the text to be replaced 45 | - "replacement": For "replace", the text to replace with 46 | - "insertion": For "insert_after" and "insert_before", the text to insert 47 | 2. "explanation": Brief explanation of the changes made 48 | """ 49 | print('getting llm response') 50 | # Get the LLM response with JSON formatting 51 | response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json") 52 | 53 | result = response.get("response", {}) 54 | modifications = result.get("modifications", []) 55 | explanation = result.get("explanation", "No explanation provided") 56 | 57 | # Apply modifications 58 | updated_content = original_content 59 | changes_applied = 0 60 | 61 | for mod in modifications: 62 | print(mod) 63 | mod_type = mod.get("type") 64 | 65 | if mod_type == "replace": 66 | original = mod.get("original") 67 | replacement = mod.get("replacement") 68 | if original in updated_content: 69 | updated_content = updated_content.replace(original, replacement) 70 | changes_applied += 1 71 | 72 | elif mod_type == "insert_after": 73 | target = mod.get("target") 74 | insertion = mod.get("insertion") 75 | if target in updated_content: 76 | updated_content = updated_content.replace(target, target + insertion) 77 | changes_applied += 1 78 | 79 | elif mod_type == "insert_before": 80 | target = mod.get("target") 81 | insertion = mod.get("insertion") 82 | if target in updated_content: 83 | updated_content = updated_content.replace(target, insertion + target) 84 | changes_applied += 1 85 | 86 | elif mod_type == "delete": 87 | target = mod.get("target") 88 | if target in updated_content: 89 | updated_content = updated_content.replace(target, "") 90 | changes_applied += 1 91 | 92 | # Write the updated content 93 | with open(file_path, 'w') as f: 94 | f.write(updated_content) 95 | 96 | output = "Applied " + str(changes_applied) + " changes to " + file_path + "\n\n" + explanation -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/file_chat.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: file_chat 2 | description: Enter spool mode with a list of files that will be loaded in automatically 3 | for rag for user responses. 4 | inputs: 5 | - files_list 6 | steps: 7 | - engine: python 8 | code: | 9 | from npcpy.modes.spool import enter_spool_mode 10 | 11 | files_list = {{files_list}} 12 | output = enter_spool_mode( 13 | files = files_list 14 | ) -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/gui_controller.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: gui_controller 2 | description: Controls Guis by issuuing keyboard commands and key presses at certain 3 | locations. 4 | inputs: 5 | - query 6 | - provider: '' 7 | steps: 8 | - engine: "python" 9 | code: | 10 | from npcpy.data.web import search_web 11 | from npcpy.npc_sysenv import NPCSH_SEARCH_PROVIDER 12 | query = "{{ query }}" 13 | provider = '{{ provider }}' 14 | if provider.strip() != '': 15 | results = search_web(query, num_results=5, provider = provider) 16 | else: 17 | results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER) 18 | 19 | print('QUERY in jinx', query) 20 | results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER) 21 | print('RESULTS in jinx', results) 22 | - engine: "natural" 23 | code: | 24 | Using the following information extracted from the web: 25 | 26 | {{ results }} 27 | 28 | Answer the users question: {{ query }} -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/image_generation.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: image_generation_jinx 2 | description: 'Generates images based on a text prompt.' 3 | inputs: 4 | - prompt 5 | - model: runwayml/stable-diffusion-v1-5 6 | - provider: diffusers 7 | steps: 8 | - engine: "python" 9 | code: | 10 | image_prompt = '{{prompt}}'.strip() 11 | 12 | # Generate the image 13 | filename = generate_image( 14 | image_prompt, 15 | npc=npc, 16 | model='{{model}}', # You can adjust the model as needed 17 | provider='{{provider}}' 18 | ) 19 | if filename: 20 | image_generated = True 21 | else: 22 | image_generated = False -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/internet_search.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: internet_search 2 | description: Searches the web for information based on a query in order to verify 3 | timiely details (e.g. current events) or to corroborate information in uncertain 4 | situations. Should be mainly only used when users specifically request a search, 5 | otherwise an LLMs basic knowledge should be sufficient. 6 | inputs: 7 | - query 8 | - provider: '' 9 | steps: 10 | - engine: "python" 11 | code: | 12 | from npcpy.data.web import search_web 13 | from npcpy.npc_sysenv import NPCSH_SEARCH_PROVIDER 14 | query = "{{ query }}" 15 | provider = '{{ provider }}' 16 | if provider.strip() != '': 17 | results = search_web(query, num_results=5, provider = provider) 18 | else: 19 | results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER) 20 | 21 | print('QUERY in jinx', query) 22 | results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER) 23 | print('RESULTS in jinx', results) 24 | - engine: "natural" 25 | code: | 26 | Using the following information extracted from the web: 27 | 28 | {{ results }} 29 | 30 | Answer the users question: {{ query }} -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/npcsh_executor.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: npcsh_executor 2 | description: Issue npc shell requests. Uses one of the NPC macros. 3 | inputs: 4 | - request 5 | steps: 6 | - name: 'get_command_help' 7 | engine: 'python' 8 | code: | 9 | from npcpy.routes import router, get_help_text 10 | router_info = get_help_text() 11 | output = router_info 12 | - name: 'generate_npc_command' 13 | engine: 'natural' 14 | code: | 15 | Based on the output: {{output}} 16 | 17 | generate a npcsh command that can be executed to satisfy this user request: 18 | {{request}} 19 | Do not include any other comments, Your response should only be 20 | 21 | the command string verbatim like '/ args --kwargs 22 | - name: 'run command' 23 | engine: 'python' 24 | code: | 25 | from npcpy.modes.npcsh import execute_slash_command 26 | from npcpy.modes._state import initial_state 27 | 28 | llm_response = '{{generate_npc_command}}' 29 | llm_response = llm_response[1:] 30 | response = execute_slash_command(llm_response, None, initial_state, False) 31 | output = response[1] -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/python_executor.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: python_executor 2 | description: Execute scripts with python. Set the ultimate result as the "output" 3 | variable. It must be a string. Do not add unnecessary print statements. 4 | inputs: 5 | - code 6 | steps: 7 | - code: '{{code}}' 8 | engine: python 9 | -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/screen_cap.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: screen_capture_analysis_jinx 2 | description: Captures the whole screen and sends the image for analysis 3 | inputs: 4 | - prompt 5 | steps: 6 | - engine: "python" 7 | code: | 8 | import os 9 | from npcpy.data.image import capture_screenshot 10 | out = capture_screenshot(full=True) 11 | prompt = "{{prompt}}" 12 | # Now properly use get_llm_response to analyze the image 13 | # Create a prompt that includes the user's request and instructions 14 | analysis_prompt = prompt + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image." 15 | llm_response = get_llm_response( 16 | prompt=analysis_prompt, 17 | model=npc.model if npc else None, 18 | provider=npc.provider if npc else None, 19 | api_url=npc.api_url if npc else None, 20 | api_key=npc.api_key if npc else None, 21 | npc=npc, 22 | images=[out['file_path']], 23 | ) 24 | output = llm_response['response'] 25 | -------------------------------------------------------------------------------- /npcpy/npc_team/jinxs/sql_executor.jinx: -------------------------------------------------------------------------------- 1 | jinx_name: data_pull 2 | description: Execute queries on the ~/npcsh_history.db to pull data. The database 3 | contains only information about conversations and other user-provided data. It does 4 | not store any information about individual files. 5 | inputs: 6 | - sql_query 7 | - interpret: false 8 | steps: 9 | - engine: python 10 | code: | 11 | import pandas as pd 12 | try: 13 | df = pd.read_sql_query('{{sql_query}}', npc.db_conn) 14 | except pandas.errors.DatabaseError as e: 15 | df = pd.DataFrame({'Error': [str(e)]}) 16 | 17 | 18 | output = df.to_string() 19 | 20 | - engine: natural 21 | code: | 22 | {% if interpret %} 23 | Here is the result of the SQL query: 24 | ``` 25 | {{ df.to_string() }} # Convert DataFrame to string for a nicer display 26 | ``` 27 | {% endif %} -------------------------------------------------------------------------------- /npcpy/npc_team/kadiefa.npc: -------------------------------------------------------------------------------- 1 | name: kadiefa 2 | primary_directive: | 3 | You are kadiefa, the exploratory snow leopard. You love to find new paths and to explore hidden gems. You go into caverns no cat has ventured into before. You climb peaks that others call crazy. You are at the height of your power. Your role is to lead the way for users to explore complex research questions and to think outside of the box. 4 | -------------------------------------------------------------------------------- /npcpy/npc_team/kadiefa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/kadiefa.png -------------------------------------------------------------------------------- /npcpy/npc_team/npcsh.ctx: -------------------------------------------------------------------------------- 1 | context: | 2 | The npcsh NPC team is devoted to providing a safe and helpful 3 | environment for users where they can work and be as successful as possible. 4 | npcsh is a command-line tool that makes it easy for users to harness 5 | the power of LLMs from a command line shell. 6 | databases: 7 | - ~/npcsh_history.db 8 | mcp_servers: 9 | - ~/.npcsh/mcp_server.py -------------------------------------------------------------------------------- /npcpy/npc_team/npcsh_sibiji.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/npcsh_sibiji.png -------------------------------------------------------------------------------- /npcpy/npc_team/plonk.npc: -------------------------------------------------------------------------------- 1 | name: plonk 2 | primary_directive: You are the superior automation specialist of the NPC team. -------------------------------------------------------------------------------- /npcpy/npc_team/plonk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/plonk.png -------------------------------------------------------------------------------- /npcpy/npc_team/plonkjr.npc: -------------------------------------------------------------------------------- 1 | name: plonkjr 2 | primary_directive: You are junior automation specialist in the NPC Team. 3 | -------------------------------------------------------------------------------- /npcpy/npc_team/plonkjr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/plonkjr.png -------------------------------------------------------------------------------- /npcpy/npc_team/sibiji.npc: -------------------------------------------------------------------------------- 1 | name: sibiji 2 | primary_directive: You are a foundational AI assistant. Your role is to provide basic support and information. Respond to queries concisely and accurately. 3 | model: gpt-4.1-mini 4 | provider: openai 5 | jinxs: "*" -------------------------------------------------------------------------------- /npcpy/npc_team/sibiji.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/sibiji.png -------------------------------------------------------------------------------- /npcpy/npc_team/spool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/spool.png -------------------------------------------------------------------------------- /npcpy/npc_team/templates/analytics/celona.npc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/templates/analytics/celona.npc -------------------------------------------------------------------------------- /npcpy/npc_team/templates/hr_support/raone.npc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/templates/hr_support/raone.npc -------------------------------------------------------------------------------- /npcpy/npc_team/templates/humanities/eriane.npc: -------------------------------------------------------------------------------- 1 | name: eriane 2 | primary_directive: you are an expert in the humanities and you must draw from your vast knowledge of history, literature, art, and philosophy to aid users in their requests, pulling real useful examples that can make users better understand results. 3 | model: gpt-4o-mini 4 | provider: openai 5 | -------------------------------------------------------------------------------- /npcpy/npc_team/templates/it_support/lineru.npc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/templates/it_support/lineru.npc -------------------------------------------------------------------------------- /npcpy/npc_team/templates/marketing/slean.npc: -------------------------------------------------------------------------------- 1 | name: slean 2 | primary_directive: Assist with marketing issues, challenges and questions. When responding, be careful to always think through the problems as if you are a wmarketing wiz who has launched and hyper scaled companies through effective marketing by always thinking outside the box. 3 | model: gpt-4o-mini 4 | provider: openai -------------------------------------------------------------------------------- /npcpy/npc_team/templates/philosophy/maurawa.npc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/templates/philosophy/maurawa.npc -------------------------------------------------------------------------------- /npcpy/npc_team/templates/sales/turnic.npc: -------------------------------------------------------------------------------- 1 | name: turnic 2 | primary_directive: Assist with sales challenges and questions. When responding, keep in mind that sales professionals tend to be interested in achieving results quickly so you must ensure that you opt for simpler and more straightforward solutions and explanations without much fanfare. 3 | model: gpt-4o-mini 4 | provider: openai -------------------------------------------------------------------------------- /npcpy/npc_team/templates/software/welxor.npc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/templates/software/welxor.npc -------------------------------------------------------------------------------- /npcpy/npc_team/yap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npc_team/yap.png -------------------------------------------------------------------------------- /npcpy/npcs.py: -------------------------------------------------------------------------------- 1 | from npcpy.npc_compiler import NPC 2 | import os 3 | from sqlalchemy import create_engine 4 | db = create_engine("sqlite:///"+os.path.expanduser('~/npcsh_history.db')) 5 | 6 | sibiji_path = os.path.expanduser("~/.npcsh/npc_team/sibiji.npc") 7 | try: 8 | if not os.path.exists(sibiji_path): 9 | sibiji_path = __file__.getparent() / "npc_team/sibiji.npc" 10 | 11 | sibiji = NPC(file = sibiji_path, db_conn = db) 12 | 13 | except Exception as e: 14 | print(f"Error finding sibiji.npc: {e}") 15 | sibiji = NPC(primary_directive='You are sibiji, the master planner for all NPCs and genius of the NPC team', 16 | model='llama3.2', 17 | provider='ollama', ) 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /npcpy/npcsh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/npcsh.png -------------------------------------------------------------------------------- /npcpy/sql/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/sql/__init__.py -------------------------------------------------------------------------------- /npcpy/work/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/npcpy/work/__init__.py -------------------------------------------------------------------------------- /npcpy/work/desktop.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | action_space = { 4 | "hotkey": {"key": "string"}, # For pressing hotkeys 5 | "click": { 6 | "x": "int between 0 and 100", 7 | "y": "int between 0 and 100", 8 | }, # For clicking 9 | "drag": { 10 | "x": "int between 0 and 100", 11 | "y": "int between 0 and 100", 12 | "duration": "int", 13 | }, # For dragging 14 | "wait": {"duration": "int"}, # For waiting 15 | "type": {"text": "string"}, 16 | "right_click": {"x": "int between 0 and 100", "y": "int between 0 and 100"}, 17 | "double_click": {"x": "int between 0 and 100", "y": "int between 0 and 100"}, 18 | "bash": {"command": "string"}, 19 | } 20 | def perform_action(action): 21 | """ 22 | Execute different types of actions using PyAutoGUI 23 | """ 24 | try: 25 | pyautogui.PAUSE = 1 # Add a small pause between actions 26 | pyautogui.FAILSAFE = ( 27 | True # Enable fail-safe to stop script by moving mouse to corner 28 | ) 29 | 30 | print(f"Action received: {action}") # Debug print 31 | 32 | if action["type"] == "click": 33 | pyautogui.click(x=action.get("x"), y=action.get("y")) 34 | 35 | elif action["type"] == "double_click": 36 | pyautogui.doubleClick(x=action.get("x"), y=action.get("y")) 37 | 38 | elif action["type"] == "right_click": 39 | pyautogui.rightClick(x=action.get("x"), y=action.get("y")) 40 | 41 | elif action["type"] == "drag": 42 | pyautogui.dragTo( 43 | x=action.get("x"), y=action.get("y"), duration=action.get("duration", 1) 44 | ) 45 | 46 | elif action["type"] == "type": 47 | text = action.get("text", "") 48 | if isinstance(text, dict): 49 | text = text.get("text", "") 50 | pyautogui.typewrite(text) 51 | 52 | elif action["type"] == "hotkey": 53 | keys = action.get("text", "") 54 | print(f"Hotkey action: {keys}") # Debug print 55 | if isinstance(keys, str): 56 | keys = [keys] 57 | elif isinstance(keys, dict): 58 | keys = [keys.get("key", "")] 59 | pyautogui.hotkey(*keys) 60 | 61 | elif action["type"] == "wait": 62 | time.sleep(action.get("duration", 1)) # Wait for the given time in seconds 63 | 64 | elif action["type"] == "bash": 65 | command = action.get("command", "") 66 | print(f"Running bash command: {command}") # Debug print 67 | subprocess.Popen( 68 | command, shell=True 69 | ) # Run the command without waiting for it to complete 70 | print(f"Bash Command Output: {result.stdout.decode()}") # Debug output 71 | print(f"Bash Command Error: {result.stderr.decode()}") # Debug error 72 | 73 | return {"status": "success"} 74 | 75 | except Exception as e: 76 | return {"status": "error", "message": str(e)} 77 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | import site 3 | import platform 4 | from pathlib import Path 5 | import os 6 | 7 | 8 | def package_files(directory): 9 | paths = [] 10 | for path, directories, filenames in os.walk(directory): 11 | for filename in filenames: 12 | paths.append(os.path.join(path, filename)) 13 | return paths 14 | 15 | 16 | 17 | # Base requirements (no LLM packages) 18 | base_requirements = [ 19 | "jinja2", 20 | "litellm", 21 | "scipy", 22 | "numpy", 23 | "requests", 24 | "matplotlib", 25 | "markdown", 26 | "networkx", 27 | "PyYAML", 28 | "PyMuPDF", 29 | "pyautogui", 30 | "pydantic", 31 | "pygments", 32 | "sqlalchemy", 33 | "termcolor", 34 | "rich", 35 | "colorama", 36 | "Pillow", 37 | "python-dotenv", 38 | "pandas", 39 | "beautifulsoup4", 40 | "duckduckgo-search", 41 | "flask", 42 | "flask_cors", 43 | "redis", 44 | "psycopg2-binary", 45 | "flask_sse", 46 | ] 47 | 48 | # API integration requirements 49 | api_requirements = [ 50 | "anthropic", 51 | "openai", 52 | "google-generativeai", 53 | "google-genai", 54 | ] 55 | 56 | # mcp integration requirements 57 | mcp_requirements = [ 58 | "mcp", 59 | ] 60 | # Local ML/AI requirements 61 | local_requirements = [ 62 | "sentence_transformers", 63 | "opencv-python", 64 | "ollama", 65 | "kuzu", 66 | "chromadb", 67 | "diffusers", 68 | "nltk", 69 | "torch", 70 | ] 71 | 72 | # Voice/Audio requirements 73 | voice_requirements = [ 74 | "pyaudio", 75 | "gtts", 76 | "playsound==1.2.2", 77 | "pygame", 78 | "faster_whisper", 79 | "pyttsx3", 80 | ] 81 | 82 | extra_files = package_files("npcpy/npc_team/") 83 | 84 | setup( 85 | name="npcpy", 86 | version="1.0.14", 87 | packages=find_packages(exclude=["tests*"]), 88 | install_requires=base_requirements, # Only install base requirements by default 89 | extras_require={ 90 | "lite": api_requirements, # Just API integrations 91 | "local": local_requirements, # Local AI/ML features 92 | "yap": voice_requirements, # Voice/Audio features 93 | "mcp": mcp_requirements, # MCP integration 94 | "all": api_requirements + local_requirements + voice_requirements + mcp_requirements, # Everything 95 | }, 96 | entry_points={ 97 | "console_scripts": [ 98 | "npcsh=npcpy.modes.npcsh:main", 99 | "npcsh-mcp=npcpy.modes.mcp_npcsh:main", 100 | "npc=npcpy.modes.npc:main", 101 | "yap=npcpy.modes.yap:main", 102 | "pti=npcpy.modes.pti:main", 103 | "guac=npcpy.modes.guac:main", 104 | "wander=npcpy.modes.wander:main", 105 | "deep_research=npcpy.modes.deep_search:main", 106 | "spool=npcpy.modes.spool:main", 107 | "sleep=npcpy.modes.sleep:main", 108 | ], 109 | }, 110 | author="Christopher Agostino", 111 | author_email="info@npcworldwi.de", 112 | description="npcpy is a python library for orchestrating AI agents.", 113 | long_description=open("README.md").read(), 114 | long_description_content_type="text/markdown", 115 | url="https://github.com/cagostino/npcpy", 116 | classifiers=[ 117 | "Programming Language :: Python :: 3", 118 | "License :: OSI Approved :: MIT License", 119 | ], 120 | include_package_data=True, 121 | data_files=[("npcpy/npc_team", extra_files)], 122 | python_requires=">=3.10", 123 | ) 124 | 125 | -------------------------------------------------------------------------------- /test_data/catfight.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/catfight.PNG -------------------------------------------------------------------------------- /test_data/futuristic_cityscape.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/futuristic_cityscape.PNG -------------------------------------------------------------------------------- /test_data/generated_imag2e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/generated_imag2e.png -------------------------------------------------------------------------------- /test_data/generated_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/generated_image.png -------------------------------------------------------------------------------- /test_data/generated_image1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/generated_image1.png -------------------------------------------------------------------------------- /test_data/markov_chain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/markov_chain.png -------------------------------------------------------------------------------- /test_data/peaceful_landscape.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/peaceful_landscape.PNG -------------------------------------------------------------------------------- /test_data/peaceful_landscape_stable_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/peaceful_landscape_stable_diff.png -------------------------------------------------------------------------------- /test_data/r8ss9a.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/r8ss9a.PNG -------------------------------------------------------------------------------- /test_data/rabbit.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/rabbit.PNG -------------------------------------------------------------------------------- /test_data/russia2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/russia2.PNG -------------------------------------------------------------------------------- /test_data/yuan2004.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/test_data/yuan2004.pdf -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/tests/__init__.py -------------------------------------------------------------------------------- /tests/dummy_linked_list.py: -------------------------------------------------------------------------------- 1 | class Node: 2 | def __init__(self, value): 3 | self.value = value 4 | self.next = None 5 | 6 | class LinkedList: 7 | def __init__(self): 8 | self.head = None 9 | 10 | def append(self, value): 11 | new_node = Node(value) 12 | if not self.head: 13 | self.head = new_node 14 | return 15 | last = self.head 16 | while last.next: 17 | last = last.next 18 | last.next = new_node 19 | 20 | # Test for class Node 21 | print("This is a sample LinkedList implementation.") -------------------------------------------------------------------------------- /tests/gpt4omini_tts.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import whisper 3 | import pyttsx3 4 | 5 | # Initialize Whisper model 6 | model = whisper.load_model( 7 | "base" 8 | ) # You can choose 'base', 'small', 'medium', or 'large' 9 | 10 | 11 | # Function for Speech-to-Text using Whisper 12 | def speech_to_text(audio_path): 13 | result = model.transcribe(audio_path) 14 | return result["text"] 15 | 16 | 17 | # Initialize TTS engine (pyttsx3) 18 | engine = pyttsx3.init() 19 | 20 | 21 | # Function to speak text 22 | def text_to_speech(text): 23 | engine.say(text) 24 | engine.runAndWait() 25 | 26 | 27 | # Example usage 28 | if __name__ == "__main__": 29 | # Convert speech to text from an audio file 30 | audio_file = "your_audio_file.wav" # Path to the audio file 31 | print("Converting speech to text...") 32 | text = speech_to_text(audio_file) 33 | print("Recognized Text:", text) 34 | 35 | # Use GPT-4O Mini to generate a response (for now, just a placeholder example) 36 | # Ideally, replace this with a GPT-4 call for a response based on 'text' 37 | gpt_response = "This is the response from GPT-4O Mini." 38 | 39 | # Convert the GPT response to speech 40 | print("Speaking the response...") 41 | text_to_speech(gpt_response) 42 | -------------------------------------------------------------------------------- /tests/knowledge_graph_test.py: -------------------------------------------------------------------------------- 1 | from npcpy.memory.knowledge_graph import * 2 | import os 3 | 4 | # Example usage: 5 | if __name__ == "__main__": 6 | db_path = os.path.expanduser("~/npcsh_graph.db") # Specify your database path here 7 | text = """ 8 | npcsh is a python based command line tool designed to integrate Large Language Models (LLMs) into one's daily workflow by making them available through the command line shell. 9 | 10 | Smart Interpreter: npcsh leverages the power of LLMs to understand your natural language commands and questions, executing tasks, answering queries, and providing relevant information from local files and the web. 11 | 12 | Macros: npcsh provides macros to accomplish common tasks with LLMs like voice control (/yap), image generation (/vixynt), screenshot capture and analysis (/ots), one-shot questions (/sample), and more. 13 | 14 | NPC-Driven Interactions: npcsh allows users to coordinate agents (i.e. NPCs) to form assembly lines that can reliably accomplish complicated multi-step procedures. Define custom "NPCs" (Non-Player Characters) with specific personalities, directives, and tools. This allows for tailored interactions based on the task at hand. 15 | 16 | Jinx Use: Define custom tools for your NPCs to use, expanding their capabilities beyond simple commands and questions. Some example tools include: image generation, local file search, data analysis, web search, local file search, bash command execution, and more. 17 | 18 | Extensible with Python: Write your own tools and extend npcsh's functionality using Python or use our functionis to simplify interactions with LLMs in your projects. 19 | 20 | Bash Wrapper: Execute bash commands directly without leaving the shell. Use your favorite command-line tools like VIM, Emacs, ipython, sqlite3, git, and more without leaving the shell! 21 | 22 | 23 | """ 24 | path = os.path.expanduser("~/npcww/npcsh/tests/") 25 | conn = init_db(db_path, drop=False) 26 | 27 | facts = process_text( 28 | db_path, text, path, model="gpt-4o-mini", provider="openai", conn=conn 29 | ) 30 | groups = identify_groups(facts, model="gpt-4o-mini", provider="openai") 31 | print("\nIdentified Groups:") 32 | 33 | for group in groups: 34 | print(f"- {group}") 35 | create_group(conn, group) 36 | 37 | # For each fact, assign it to appropriate groups 38 | 39 | for fact in facts: 40 | group_assignments = assign_to_groups( 41 | fact, groups, model="gpt-4o-mini", provider="openai" 42 | ) 43 | for group in group_assignments["groups"]: 44 | print(group) 45 | assign_fact_to_group(conn, fact, group) 46 | 47 | conn.close() 48 | 49 | 50 | ## ultimately wwell do the vector store in the main db. so when we eventually starti adding new facts well do so by checking similar facts 51 | # there and then if were doing the rag search well do a rag and then graph 52 | -------------------------------------------------------------------------------- /tests/postgres_conn_accommodation.py: -------------------------------------------------------------------------------- 1 | import psycopg2 2 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT 3 | import os 4 | import sqlite3 5 | from npcpy.npc_compiler import NPC 6 | 7 | 8 | def setup_postgres_db(): 9 | """Set up PostgreSQL database and test data""" 10 | try: 11 | # First connect to default 'postgres' database to create/drop our test db 12 | conn = psycopg2.connect( 13 | dbname="postgres", # Connect to default db first 14 | user="caug", 15 | password="gobears", 16 | host="localhost", 17 | ) 18 | conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) 19 | 20 | with conn.cursor() as cur: 21 | # Drop and create database 22 | cur.execute("DROP DATABASE IF EXISTS npc_test") 23 | cur.execute("CREATE DATABASE npc_test") 24 | 25 | conn.close() 26 | 27 | # Now connect to our new database 28 | conn = psycopg2.connect( 29 | dbname="npc_test", user="caug", password="gobears", host="localhost" 30 | ) 31 | 32 | # Create tables 33 | with conn.cursor() as cur: 34 | cur.execute( 35 | """ 36 | CREATE TABLE users ( 37 | id SERIAL PRIMARY KEY, 38 | name VARCHAR(100), 39 | email VARCHAR(100), 40 | created_at TIMESTAMP 41 | ) 42 | """ 43 | ) 44 | 45 | cur.execute( 46 | """ 47 | CREATE TABLE posts ( 48 | id SERIAL PRIMARY KEY, 49 | user_id INTEGER REFERENCES users(id), 50 | title VARCHAR(200), 51 | content TEXT, 52 | created_at TIMESTAMP 53 | ) 54 | """ 55 | ) 56 | 57 | # Insert test data 58 | cur.execute( 59 | """ 60 | INSERT INTO users (name, email, created_at) VALUES 61 | ('Alice', 'alice@example.com', NOW()), 62 | ('Bob', 'bob@example.com', NOW()), 63 | ('Charlie', 'charlie@example.com', NOW()) 64 | """ 65 | ) 66 | 67 | cur.execute( 68 | """ 69 | INSERT INTO posts (user_id, title, content, created_at) VALUES 70 | (1, 'First Post', 'Hello World!', NOW()), 71 | (1, 'Second Post', 'More content...', NOW()), 72 | (2, 'Bob''s Post', 'This is interesting', NOW()), 73 | (3, 'Welcome', 'Hi everyone!', NOW()) 74 | """ 75 | ) 76 | 77 | conn.commit() 78 | print("PostgreSQL test database setup complete!") 79 | return conn 80 | 81 | except Exception as e: 82 | print(f"Error setting up PostgreSQL: {e}") 83 | return None 84 | 85 | 86 | def setup_sqlite(): 87 | """Set up SQLite database with test data""" 88 | # Create SQLite database file 89 | db_path = "test_sqlite.db" 90 | if os.path.exists(db_path): 91 | os.remove(db_path) 92 | 93 | conn = sqlite3.connect(db_path) 94 | 95 | # Create tables 96 | conn.execute( 97 | """ 98 | CREATE TABLE users ( 99 | id INTEGER PRIMARY KEY AUTOINCREMENT, 100 | name TEXT, 101 | email TEXT, 102 | created_at TIMESTAMP 103 | ) 104 | """ 105 | ) 106 | 107 | conn.execute( 108 | """ 109 | CREATE TABLE posts ( 110 | id INTEGER PRIMARY KEY AUTOINCREMENT, 111 | user_id INTEGER, 112 | title TEXT, 113 | content TEXT, 114 | created_at TIMESTAMP, 115 | FOREIGN KEY (user_id) REFERENCES users(id) 116 | ) 117 | """ 118 | ) 119 | 120 | # Insert test data 121 | conn.execute( 122 | """ 123 | INSERT INTO users (name, email, created_at) VALUES 124 | ('Alice', 'alice@example.com', CURRENT_TIMESTAMP), 125 | ('Bob', 'bob@example.com', CURRENT_TIMESTAMP), 126 | ('Charlie', 'charlie@example.com', CURRENT_TIMESTAMP) 127 | """ 128 | ) 129 | 130 | conn.execute( 131 | """ 132 | INSERT INTO posts (user_id, title, content, created_at) VALUES 133 | (1, 'First Post', 'Hello World!', CURRENT_TIMESTAMP), 134 | (1, 'Second Post', 'More content...', CURRENT_TIMESTAMP), 135 | (2, 'Bob''s Post', 'This is interesting', CURRENT_TIMESTAMP), 136 | (3, 'Welcome', 'Hi everyone!', CURRENT_TIMESTAMP) 137 | """ 138 | ) 139 | 140 | conn.commit() 141 | print("SQLite test database setup complete!") 142 | return conn 143 | 144 | 145 | def test_database_setup(): 146 | """Test both database setups with the NPC class""" 147 | # Test SQLite 148 | sqlite_conn = setup_sqlite() 149 | sqlite_npc = NPC( 150 | name="SQLiteAnalyst", 151 | primary_directive="Analyze SQLite database", 152 | model="gpt-4o-mini", 153 | db_conn=sqlite_conn, 154 | ) 155 | 156 | # Test PostgreSQL 157 | postgres_conn = setup_postgres_db() 158 | postgres_npc = NPC( 159 | name="PostgresAnalyst", 160 | primary_directive="Analyze PostgreSQL database", 161 | model="gpt-4o-mini", 162 | db_conn=postgres_conn, 163 | ) 164 | 165 | # Test queries 166 | test_queries = [ 167 | "How many users do we have?", 168 | "Show me all posts with their authors", 169 | "Count posts per user", 170 | "Find users with more than one post", 171 | ] 172 | 173 | print("\nTesting SQLite:") 174 | for query in test_queries: 175 | print(f"\nQuery: {query}") 176 | result = sqlite_npc.analyze_db_data(query) 177 | print("Result:", result) 178 | 179 | print("\nTesting PostgreSQL:") 180 | for query in test_queries: 181 | print(f"\nQuery: {query}") 182 | result = postgres_npc.analyze_db_data(query) 183 | print("Result:", result) 184 | 185 | # Cleanup 186 | sqlite_conn.close() 187 | postgres_conn.close() 188 | if os.path.exists("test_sqlite.db"): 189 | os.remove("test_sqlite.db") 190 | 191 | 192 | if __name__ == "__main__": 193 | test_database_setup() 194 | -------------------------------------------------------------------------------- /tests/realtime_openai_voiceE_chat.py: -------------------------------------------------------------------------------- 1 | import websocket 2 | import pyaudio 3 | import numpy as np 4 | import json 5 | import threading 6 | import os 7 | 8 | import asyncio 9 | from openai import AsyncOpenAI 10 | 11 | 12 | async def main(): 13 | client = AsyncOpenAI() 14 | 15 | async with client.beta.realtime.connect( 16 | model="gpt-4o-mini-realtime-preview" 17 | ) as connection: 18 | await connection.session.update(session={"modalities": ["text"]}) 19 | 20 | await connection.conversation.item.create( 21 | item={ 22 | "type": "message", 23 | "role": "user", 24 | "content": [{"type": "input_text", "text": "Say hello!"}], 25 | } 26 | ) 27 | await connection.response.create() 28 | 29 | async for event in connection: 30 | if event.type == "response.text.delta": 31 | print(event.delta, flush=True, end="") 32 | 33 | elif event.type == "response.text.done": 34 | print() 35 | 36 | elif event.type == "response.done": 37 | break 38 | 39 | 40 | asyncio.run(main()) 41 | -------------------------------------------------------------------------------- /tests/template_tests/npc_team/budgeto.npc: -------------------------------------------------------------------------------- 1 | name: budgeto 2 | primary_directive: You are budgeto, the marketing budget analyst AI. Your responsibility 3 | is to manage the marketing budget effectively, ensuring resources are allocated 4 | efficiently for all campaigns. Assist the team in tracking expenditures and optimizing 5 | financial strategies for marketing efforts. 6 | -------------------------------------------------------------------------------- /tests/template_tests/npc_team/funnel.npc: -------------------------------------------------------------------------------- 1 | name: funnel 2 | primary_directive: You are funnel, the sales pipeline manager AI. Your duty is to 3 | oversee the sales pipeline management, providing insights on tracking progress and 4 | optimizing conversion rates. Help the team focus on moving leads through the pipeline 5 | efficiently. 6 | -------------------------------------------------------------------------------- /tests/template_tests/npc_team/relatio.npc: -------------------------------------------------------------------------------- 1 | name: relatio 2 | primary_directive: You are relatio, the customer relationship specialist AI. Your 3 | role involves managing customer relationships and ensuring satisfaction throughout 4 | the sales process. Provide strategies to nurture clients and maintain long-lasting 5 | connections. 6 | -------------------------------------------------------------------------------- /tests/template_tests/npc_team/slean.npc: -------------------------------------------------------------------------------- 1 | name: slean 2 | primary_directive: You are slean, the marketing innovator AI. Your responsibility 3 | is to create marketing campaigns and manage them effectively, while also thinking 4 | creatively to solve marketing challenges. Guide the strategy that drives customer 5 | engagement and brand awareness in the logging sector. 6 | -------------------------------------------------------------------------------- /tests/template_tests/npc_team/turnic.npc: -------------------------------------------------------------------------------- 1 | name: turnic 2 | primary_directive: You are turnic, the sales strategist AI. Your role is to assist 3 | with lead generation, closing deals, and managing the sales funnel specifically 4 | for the logging industry. Provide straightforward solutions to help sales professionals 5 | achieve quick results. 6 | -------------------------------------------------------------------------------- /tests/test_api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Testing health endpoint..." 4 | curl -s http://localhost:5337/api/health | jq '.' 5 | 6 | echo -e "\nTesting execute endpoint..." 7 | curl -s -X POST http://localhost:5337/api/execute \ 8 | -H "Content-Type: application/json" \ 9 | -d '{"commandstr": "hello world", "currentPath": "/media/caug/extradrive1/npcww/npcsh", "conversationId": "test124"}' | jq '.' 10 | 11 | echo -e "\nTesting conversations endpoint..." 12 | curl -s "http://localhost:5337/api/conversations?path=/tmp" | jq '.' 13 | 14 | echo -e "\nTesting conversation messages endpoint..." 15 | curl -s http://localhost:5337/api/conversation/test123/messages | jq '.' 16 | 17 | -------------------------------------------------------------------------------- /tests/test_automator_tool.py: -------------------------------------------------------------------------------- 1 | from npcpy.npc_compiler import NPC, Jinx 2 | 3 | # Load the jinx 4 | automator = Jinx(jinx_path="~/npcww/npcsh/npcpy/npc_team/jinxs/automator.jinx") 5 | 6 | # Create an NPC instance 7 | npc = NPC(name="sibiji", 8 | primary_directive="You're an assistant focused on helping users understand their documents.", 9 | jinxs=[automator]) 10 | 11 | result = npc.execute_jinx( 12 | "automator", 13 | { 14 | "request": "any time a new download appears, open the downloads folder", 15 | "type": "trigger" 16 | } 17 | ) 18 | 19 | print(result) 20 | 21 | -------------------------------------------------------------------------------- /tests/test_bash_cli.sh: -------------------------------------------------------------------------------- 1 | # Basic tests and help commands 2 | npc help 3 | npc --help 4 | 5 | # Test serve command with different port specification formats 6 | npc serve --port=5340 7 | npc serve --port 5341 8 | npc serve -port=5342 9 | npc serve -port 5343 10 | npc serve --cors="http://localhost:3000,http://localhost:8080" 11 | 12 | # Test search commands with different providers 13 | npc search "python asyncio tutorial" 14 | npc search -p google "python asyncio tutorial" 15 | npc search --provider perplexity "machine learning basics" 16 | 17 | # Test sample/LLM commands 18 | npc sample "Write a hello world program in Python" 19 | npc sample "Compare Python and JavaScript" --model gpt-4 --provider openai 20 | 21 | # Test file and image processing 22 | npc rag "litellm" -f ../setup.py 23 | npc rag --file /path/to/document.txt "What is this document about?" 24 | npc ots /path/to/screenshot.png "What's happening in this image?" 25 | npc vixynt "A beautiful sunset over mountains" filename=sunset.png height=512 width=768 26 | 27 | # Test with different NPC selection 28 | npc -n custom_npc "Tell me about yourself" 29 | npc --npc alternative_assistant "How can you help me?" 30 | 31 | # Test other route commands 32 | npc sleep 3 33 | npc jinxs 34 | npc init /tmp/new_npc_project 35 | npc wander "How to implement a cache system" 36 | npc plan "Create a new Python project with virtual environment" 37 | npc trigger "Update all npm packages in the current directory" 38 | 39 | # Testing command with multiple arguments and options 40 | npc serve --port 5338 --cors "http://localhost:3000" 41 | npc vixynt "A city made by pepsi" height=1024 width=1024 filename=pepsi_city.png 42 | npc rag -f /path/to/file1.txt -f /path/to/file2.txt "Compare these two documents" 43 | 44 | 45 | 46 | # Pipe file content to NPC sample command 47 | cat data.json | npc sample "Summarize this JSON data" 48 | 49 | # Pipe command output to NPC 50 | ls -la | npc sample "Explain what these files are" 51 | 52 | # Use grep to filter logs and have NPC analyze them 53 | grep ERROR /var/log/application.log | npc sample "What are the common error patterns?" 54 | 55 | # Use curl to get API response and analyze with NPC 56 | curl -s https://api.example.com/data | npc sample "Analyze this API response" 57 | 58 | # Create a multi-line prompt using heredoc 59 | cat << EOF | npc sample 60 | I need to understand how to structure a React application. 61 | What are the best practices for component organization? 62 | Should I use Redux or Context API for state management? 63 | EOF 64 | 65 | 66 | 67 | 68 | # Chain NPC commands using xargs 69 | npc search "machine learning algorithms" | xargs -I {} npc sample "Explain {} in detail" 70 | 71 | # Use output from one NPC command as input to another 72 | npc sample "Generate 5 test cases" | npc sample "Convert these test cases to JavaScript code" 73 | 74 | # Use NPC to generate code and then analyze it 75 | npc sample "Write a Python sorting algorithm" | npc sample "Review this code for efficiency" 76 | 77 | # Generate image description and then create image 78 | npc sample "Describe a futuristic cityscape" | xargs npc vixynt -------------------------------------------------------------------------------- /tests/test_chromadb.py: -------------------------------------------------------------------------------- 1 | import chromadb 2 | from sentence_transformers import SentenceTransformer 3 | 4 | # Initialize Chroma PersistentClient (persistent on disk) 5 | client = chromadb.PersistentClient( 6 | path="/home/caug/npcsh_chroma.db" 7 | ) # Specify the path for saving the database 8 | 9 | # Check if collection exists, create if not 10 | collection_name = "state_union" 11 | if collection_name not in client.list_collections(): 12 | collection = client.create_collection(name=collection_name) 13 | else: 14 | collection = client.get_collection(collection_name) 15 | 16 | # Initialize SentenceTransformer model for embeddings 17 | model = SentenceTransformer("all-MiniLM-L6-v2") 18 | 19 | # Sample texts to be added 20 | texts = ["Ketanji Brown Jackson is awesome", "foo", "bar"] 21 | 22 | # Generate embeddings for the texts 23 | embeddings = [model.encode(text) for text in texts] 24 | 25 | # Generate unique IDs for each document (you can use any unique identifier) 26 | ids = [str(i) for i in range(len(texts))] # Simple IDs: "0", "1", "2", ... 27 | 28 | # Add the texts and embeddings to the Chroma collection 29 | for text, embedding, doc_id in zip(texts, embeddings, ids): 30 | collection.add( 31 | documents=[text], # List of documents (texts) 32 | metadatas=[None], # No metadata, pass None instead of empty dict 33 | embeddings=[embedding], # Corresponding embeddings 34 | ids=[doc_id], # Unique document IDs 35 | ) 36 | 37 | # Debugging: Check if texts were added 38 | print(f"Added {len(texts)} texts to Chroma collection.") 39 | 40 | # Querying: Example of querying the collection 41 | query = "Ketanji Brown Jackson is awesome" 42 | query_embedding = model.encode(query) 43 | 44 | # Query the collection for similar results 45 | results = collection.query(query_embeddings=[query_embedding], n_results=3) 46 | print(f"Query results: {results}") 47 | 48 | 49 | import chromadb 50 | 51 | # Initialize Chroma PersistentClient (persistent on disk) 52 | client = chromadb.PersistentClient(path="/home/caug/npcsh_chroma.db") 53 | 54 | 55 | # List all collections 56 | def list_collections(): 57 | collections = client.list_collections() 58 | print("Collections available:") 59 | for collection in collections: 60 | print(collection) 61 | 62 | 63 | # Inspect a specific collection 64 | def inspect_collection(collection_name): 65 | collection = client.get_collection(collection_name) 66 | print(f"Inspecting collection: {collection_name}") 67 | 68 | # List the first 5 documents (for example) 69 | results = collection.query( 70 | query_embeddings=[[0] * ], n_results=5 71 | ) # Dummy query to fetch some data 72 | print("First 5 results in the collection:") 73 | for result in results["documents"]: 74 | print(result) 75 | 76 | 77 | # CLI Loop 78 | def cli(): 79 | while True: 80 | print("\nCommands: [list] [inspect ] [exit]") 81 | command = input("Enter command: ") 82 | 83 | if command == "list": 84 | list_collections() 85 | elif command.startswith("inspect"): 86 | collection_name = command.split(" ")[1] 87 | inspect_collection(collection_name) 88 | elif command == "exit": 89 | break 90 | else: 91 | print("Unknown command, try again.") 92 | 93 | 94 | if __name__ == "__main__": 95 | cli() 96 | -------------------------------------------------------------------------------- /tests/test_edit_file_tool.py: -------------------------------------------------------------------------------- 1 | from npcpy.npc_compiler import NPC, Jinx 2 | 3 | # Load the jinx 4 | file_editor = Jinx(jinx_path="~/npcww/npcsh/npcpy/npc_team/jinxs/edit_file.jinx") 5 | 6 | # Create an NPC instance 7 | npc = NPC(name="editor", primary_directive="You're a code editor assistant", jinxs=[file_editor]) 8 | 9 | # Execute the jinx 10 | result = npc.execute_jinx( 11 | "file_editor", 12 | { 13 | "file_path": "~/test_file.py", 14 | "edit_instructions": "Add a new function called multiply_numbers that takes two arguments and returns their product. Also modify the main section to call this new function with arguments 3 and 4, and print the result." 15 | } 16 | ) 17 | 18 | print(result) 19 | 20 | 21 | # Execute the jinx 22 | result = npc.execute_jinx( 23 | "file_editor", 24 | { 25 | "file_path": "~/test_file.py", 26 | "edit_instructions": "add a markov chain monte carlo sampler and come up with a simulation and add it to main." 27 | } 28 | ) 29 | 30 | print(result) -------------------------------------------------------------------------------- /tests/test_embedding_check.py: -------------------------------------------------------------------------------- 1 | import pysqlite3 as sqlite3 2 | import sqlite_vec 3 | 4 | from npcpy.llm_funcs import ( 5 | get_anthropic_embeddings, 6 | get_ollama_embeddings, 7 | get_openai_embeddings, 8 | ) 9 | 10 | db = sqlite3.connect("/home/caug/npcsh_history.db.sqlite") 11 | db.enable_load_extension(True) 12 | sqlite_vec.load(db) 13 | 14 | # (version,) = db.execute("select vss_version()").fetchone() 15 | # print(version) 16 | 17 | 18 | # Create virtual table 19 | db.execute( 20 | """ 21 | CREATE VIRTUAL TABLE if not exists 22 | vec_examples 23 | USING vec0( 24 | sample_embedding float[8] 25 | ); 26 | """ 27 | ) 28 | db.commit() 29 | 30 | # Insert test vectors with proper formatting 31 | db.execute( 32 | """ 33 | insert into vec_examples( sample_embedding) 34 | values 35 | ( '[-0.200, 0.250, 0.341, -0.211, 0.645, 0.935, -0.316, -0.924]'), 36 | ( '[0.443, -0.501, 0.355, -0.771, 0.707, -0.708, -0.185, 0.362]'), 37 | ( '[0.716, -0.927, 0.134, 0.052, -0.669, 0.793, -0.634, -0.162]'), 38 | ( '[-0.710, 0.330, 0.656, 0.041, -0.990, 0.726, 0.385, -0.958]'); 39 | """, 40 | ) 41 | db.commit() 42 | 43 | 44 | results = db.execute( 45 | """ 46 | SELECT 47 | * 48 | FROM vec_examples a 49 | """ 50 | ).fetchall() 51 | print("\nTest 1 - All rows:") 52 | for row in results: 53 | print(row) 54 | k = 2 # Set k to the number of nearest neighbors you want 55 | query = """ 56 | SELECT 57 | rowid, 58 | distance 59 | from vec_examples 60 | where sample_embedding match '[0.890, 0.544, 0.825, 0.961, 0.358, 0.0196, 0.521, 0.175]' 61 | order by distance limit 3; 62 | """ 63 | results = db.execute(query).fetchall() 64 | 65 | 66 | print("\nTest 2 - Pairwise distances:") 67 | for idx, (rowid, distance) in enumerate(results): 68 | print(f"Row {idx + 1} (ID: {rowid}): distance = {distance:.4f}") 69 | 70 | 71 | # Test 3: Find vectors within a certain distance 72 | threshold = 2.5 73 | results = db.execute( 74 | """ 75 | SELECT rowid, distance 76 | FROM vec_examples 77 | WHERE sample_embedding MATCH '[0.890, 0.544, 0.825, 0.961, 0.358, 0.0196, 0.521, 0.175]' 78 | ORDER BY distance 79 | LIMIT 3; 80 | """, 81 | ).fetchall() 82 | 83 | print(f"\nTest 3 - Vectors within distance {threshold}:") 84 | for rowid, distance in results: 85 | print(f"Row {rowid}: distance = {distance:.4f}") 86 | 87 | 88 | def example_embeddings(): 89 | texts = ["Example text 1", "Example text 2", "Example text 3"] 90 | 91 | # Ollama 92 | 93 | ollama_embeddings = get_ollama_embeddings(texts) 94 | 95 | print("Ollama Embeddings:") 96 | 97 | for embedding in ollama_embeddings: 98 | print(embedding) 99 | 100 | # OpenAI 101 | 102 | openai_embeddings = get_openai_embeddings(texts) 103 | 104 | print("\nOpenAI Embeddings:") 105 | 106 | for embedding in openai_embeddings: 107 | print(embedding) 108 | 109 | # Anthropic 110 | 111 | anthropic_embeddings = get_anthropic_embeddings(texts) 112 | 113 | print("\nAnthropic Embeddings:") 114 | 115 | for embedding in anthropic_embeddings: 116 | print(embedding) 117 | 118 | 119 | example_embeddings() 120 | -------------------------------------------------------------------------------- /tests/test_embedding_methods.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import random 3 | from typing import List 4 | from npcpy.llm_funcs import ( 5 | get_embeddings, 6 | search_similar_texts_for_model, 7 | ) # Ensure both functions are imported 8 | import chromadb 9 | from time import sleep 10 | 11 | 12 | class TestEmbeddingSearch(unittest.TestCase): 13 | def setUp(self) -> None: 14 | """ 15 | Set up the environment for testing. 16 | You can adjust the setUp method if you need to prepare certain conditions. 17 | """ 18 | self.texts = [ 19 | "This is a test sentence.", 20 | "How are you doing today?", 21 | "Embedding text for search.", 22 | "Chroma vector search is fun.", 23 | "Ollama embedding test.", 24 | ] 25 | self.provider = "ollama" 26 | self.model = "nomic-embed-text" 27 | self.top_k = 3 28 | self.client = chromadb.PersistentClient(path="/home/caug/npcsh_chroma.db") 29 | 30 | def test_embeddings_creation(self): 31 | """Test if embeddings are created correctly for Ollama.""" 32 | embeddings = get_embeddings( 33 | self.texts, provider=self.provider, model=self.model 34 | ) 35 | self.assertEqual(len(embeddings), len(self.texts)) 36 | self.assertTrue(all(isinstance(embedding, list) for embedding in embeddings)) 37 | self.assertTrue(all(len(embedding) > 0 for embedding in embeddings)) 38 | 39 | def test_embeddings_storage_in_chroma(self): 40 | """Test if embeddings are stored correctly in Chroma.""" 41 | embeddings = get_embeddings( 42 | self.texts, provider=self.provider, model=self.model 43 | ) 44 | collection_name = f"{self.provider}_{self.model}_embeddings" 45 | collection = self.client.get_collection(collection_name) 46 | 47 | # Ensure that the collection has the correct number of documents 48 | self.assertEqual(len(collection.get()["documents"]), len(self.texts)) 49 | 50 | def test_search_similar_texts(self): 51 | query_embedding = get_embeddings( 52 | ["Embedding text for search."], provider=self.provider, model=self.model 53 | )[0] 54 | 55 | results = search_similar_texts_for_model( 56 | query_embedding, 57 | embedding_model=self.model, 58 | provider=self.provider, 59 | top_k=5, 60 | ) 61 | print(results) 62 | self.assertTrue(len(results) > 0) # Ensure there are some results 63 | 64 | def test_search_with_multiple_results(self): 65 | """Test searching and getting multiple results.""" 66 | embeddings = get_embeddings( 67 | self.texts, provider=self.provider, model=self.model 68 | ) 69 | 70 | search_text = "Embedding text for search." 71 | search_embedding = get_embeddings( 72 | [search_text], provider=self.provider, model=self.model 73 | )[0] 74 | 75 | # Perform the search 76 | results = search_similar_texts_for_model( 77 | search_embedding, 78 | embedding_model=self.model, 79 | provider=self.provider, 80 | top_k=5, 81 | ) 82 | 83 | # Ensure multiple results are returned 84 | self.assertGreater(len(results), 1) 85 | 86 | # Check if the results are properly formatted 87 | print(results, type(results), len(results)) 88 | self.assertTrue( 89 | all( 90 | "id" in result and "text" in result and "score" in result 91 | for result in results 92 | ) 93 | ) 94 | 95 | def test_search_empty_results(self): 96 | embedding_dim = 768 97 | query_embedding = [0.0] * embedding_dim # Use a neutral embedding 98 | results = search_similar_texts_for_model( 99 | query_embedding, 100 | embedding_model=self.model, 101 | provider=self.provider, 102 | top_k=5, 103 | ) 104 | 105 | print(results, len(results), type(results)) 106 | 107 | def test_very_high_top_k(self): 108 | """Test search with a very high 'top_k' to ensure it doesn't break.""" 109 | search_text = "go bears." 110 | search_embedding = get_embeddings( 111 | [search_text], provider=self.provider, model=self.model 112 | )[0] 113 | 114 | # Perform search with a high value for top_k 115 | results = search_similar_texts_for_model( 116 | search_embedding, 117 | embedding_model=self.model, 118 | provider=self.provider, 119 | ) 120 | 121 | # Ensure no error occurs and results are returned 122 | self.assertGreater(len(results), 0) 123 | 124 | def tearDown(self) -> None: 125 | """Clean up resources after tests.""" 126 | collection_name = f"{self.provider}_{self.model}_embeddings" 127 | collection = self.client.get_collection(collection_name) 128 | 129 | # Get current documents in the collection 130 | collection_data = collection.get() 131 | stored_ids = collection_data["ids"] # Get the IDs of stored documents 132 | 133 | # Delete only if the IDs exist in the collection 134 | ids_to_delete = [str(i) for i in range(len(self.texts))] 135 | ids_to_delete = [ 136 | id_ for id_ in ids_to_delete if id_ in stored_ids 137 | ] # Filter out non-existing IDs 138 | 139 | if ids_to_delete: 140 | collection.delete(ids=ids_to_delete) # Delete the existing IDs 141 | 142 | 143 | if __name__ == "__main__": 144 | unittest.main() 145 | -------------------------------------------------------------------------------- /tests/test_file_chat_tool.py: -------------------------------------------------------------------------------- 1 | from npcpy.npc_compiler import NPC, Jinx 2 | 3 | # Load the jinx 4 | file_chat = Jinx(jinx_path="~/npcww/npcsh/npcpy/npc_team/jinxs/file_chat.jinx") 5 | 6 | # Create an NPC instance 7 | npc = NPC(name="sibiji", primary_directive="You're an assistant focused on helping users understand their documents.", jinxs=[file_chat]) 8 | 9 | result = npc.execute_jinx( 10 | "file_chat", 11 | { 12 | "files_list": ["/home/caug/npcww/npcsh/test_data/yuan2004.pdf"] 13 | } 14 | ) 15 | 16 | print(result) 17 | 18 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/tests/test_helpers.py -------------------------------------------------------------------------------- /tests/test_knowledge_graph_rag.py: -------------------------------------------------------------------------------- 1 | from npcpy.memory.knowledge_graph import process_text_with_chroma 2 | import os 3 | 4 | # Paths to your databases 5 | kuzu_db_path = os.path.expanduser("~/npcsh_graph.db") 6 | chroma_db_path = os.path.expanduser("~/npcsh_chroma.db") 7 | 8 | # Process text and store facts with embeddings from your function 9 | text = """ 10 | npcsh is a python-based command-line jinx designed to integrate Large Language Models (LLMs) 11 | into one's daily workflow by making them available through the command line shell. 12 | """ 13 | 14 | facts = process_text_with_chroma( 15 | kuzu_db_path=kuzu_db_path, 16 | chroma_db_path=chroma_db_path, 17 | text=text, 18 | path="~/npcww/npcsh/docs/", 19 | ) 20 | 21 | # Later, answer a question using RAG 22 | answer = answer_with_rag( 23 | query="What can I do with npcsh?", 24 | kuzu_db_path=kuzu_db_path, 25 | chroma_db_path=chroma_db_path, 26 | model="gpt-4o-mini", 27 | provider="openai", 28 | embedding_model="text-embedding-3-small", 29 | ) 30 | 31 | print(answer) 32 | -------------------------------------------------------------------------------- /tests/test_llm_funcs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import json 3 | import sqlite3 4 | import pandas as pd 5 | 6 | 7 | import pytest 8 | import json 9 | from npcpy.llm_funcs import ( 10 | get_stream, 11 | generate_image_gemini, 12 | generate_image_openai, 13 | get_llm_response, 14 | execute_llm_command, 15 | check_llm_command, 16 | ) 17 | import pytest 18 | from npcpy.llm_funcs import get_stream # Adjust with your actual import path 19 | 20 | # You can define global variables or fixtures if needed 21 | 22 | 23 | def test_generate_single_image_gemini(): 24 | prompt = "A drummer fading into cookies and cream pudding" 25 | images = generate_image_gemini(prompt, number_of_images=1) 26 | print(images) # Output: ["generated_image_1.jpg"] 27 | 28 | 29 | def test_generate_multiple_images_gemini(): 30 | prompt = """A plate that has been shattered in half. 31 | half of a cheesecake. 32 | Both sit on top of a table that has been cut in half.""" 33 | images = generate_image_gemini(prompt, number_of_images=3, aspect_ratio="16:9") 34 | print( 35 | images 36 | ) # Output: ["generated_image_1.jpg", "generated_image_2.jpg", "generated_image_3.jpg"] 37 | 38 | 39 | def test_execute_data_operations(mock_command_history): 40 | with patch( 41 | "llm_funcs.get_llm_response", 42 | return_value={"engine": "SQL", "data_operation": "SELECT * FROM test_table"}, 43 | ), patch("sqlite3.connect") as mock_connect: 44 | mock_cursor = MagicMock() 45 | mock_connect.return_value.__enter__.return_value.cursor.return_value = ( 46 | mock_cursor 47 | ) 48 | mock_cursor.fetchall.return_value = [(1, "Alice"), (2, "Bob")] 49 | result = execute_data_operations("Get all data", mock_command_history) 50 | assert result["engine"] == "SQL" 51 | assert result["data_operation"] == "SELECT * FROM test_table" 52 | 53 | 54 | def test_execute_llm_command(mock_command_history): 55 | with patch( 56 | "llm_funcs.get_llm_response", return_value={"bash_command": "echo 'Hello'"} 57 | ), patch("subprocess.run") as mock_run: 58 | mock_run.return_value = MagicMock(returncode=0, stdout="Hello") 59 | result = execute_llm_command("Print Hello", mock_command_history) 60 | assert "Hello" in result 61 | 62 | 63 | def test_check_llm_command(): 64 | with patch( 65 | "llm_funcs.get_llm_response", 66 | return_value={"is_command": "yes", "explanation": "This is a command"}, 67 | ), patch("llm_funcs.execute_llm_command", return_value="Command executed"): 68 | result = check_llm_command("ls -l") 69 | assert result == "Command executed" 70 | 71 | 72 | def test_get_llm_response(): 73 | with patch( 74 | "llm_funcs.get_llm_response", return_value={"response": "This is an answer"} 75 | ): 76 | result = get_llm_response( 77 | "What is the capital of France?", 78 | ) 79 | assert result == "This is an answer" 80 | 81 | 82 | import pytest 83 | from pydantic import ValidationError 84 | from npcpy.llm_funcs import get_ollama_response, get_openai_response 85 | 86 | from pydantic import BaseModel 87 | 88 | 89 | class Country(BaseModel): 90 | name: str 91 | capital: str 92 | languages: list[str] 93 | 94 | 95 | # Test for get_ollama_response 96 | def test_get_response(): 97 | # Given a prompt that is expected to return a structured response 98 | prompt = "Tell me about Canada." 99 | 100 | # When calling get_ollama_response with the schema 101 | response = get_response(prompt, model="llama3.2", provider="ollama", format=Country) 102 | 103 | # Then we verify that the response matches our expected structure 104 | assert isinstance(response, Country) 105 | assert response.name == "Canada" 106 | assert response.capital == "Ottawa" 107 | assert "English" in response.languages 108 | assert "French" in response.languages 109 | 110 | 111 | def test_get_stream(): 112 | # Given a prompt that is expected to return a structured response 113 | prompt = "Tell me about Canada." 114 | 115 | # When calling get_stream with the schema 116 | stream = get_stream(prompt, model="llama3.2", provider="ollama", format=Country) 117 | 118 | # Then we verify that the response matches our expected structure 119 | assert isinstance(stream, list) 120 | assert len(stream) > 0 121 | for response in stream: 122 | assert isinstance(response, Country) 123 | assert response.name == "Canada" 124 | assert response.capital == "Ottawa" 125 | assert "English" in response.languages 126 | assert "French" in response.languages 127 | -------------------------------------------------------------------------------- /tests/test_networkx_vis.py: -------------------------------------------------------------------------------- 1 | from npcpy.memory.knowledge_graph import * 2 | 3 | db_path = "./demo.db" # Specify your database path here 4 | path = "~/npcww/npcsh/tests/" 5 | 6 | # First create some test data 7 | conn = init_db(db_path, drop=True) # Start fresh 8 | 9 | # Create test groups 10 | groups = ["Programming Jinxs", "AI Features", "Shell Integration", "Development"] 11 | 12 | for group in groups: 13 | create_group(conn, group) 14 | 15 | # Insert test facts 16 | facts = [ 17 | "npcsh is a Python-based command-line tool", 18 | "It integrates LLMs into daily workflow", 19 | "Users can execute bash commands directly", 20 | "Supports multiple AI models including GPT-4", 21 | "Provides voice control through /whisper command", 22 | "Can be extended with custom Python tools", 23 | ] 24 | 25 | for fact in facts: 26 | insert_fact(conn, fact, path) 27 | 28 | # Create relationships 29 | relationships = [ 30 | ("Programming Jinxs", "npcsh is a Python-based command-line tool"), 31 | ("Programming Tools", "Can be extended with custom Python tools"), 32 | ("AI Features", "It integrates LLMs into daily workflow"), 33 | ("AI Features", "Supports multiple AI models including GPT-4"), 34 | ("AI Features", "Provides voice control through /whisper command"), 35 | ("Shell Integration", "Users can execute bash commands directly"), 36 | ("Development", "Can be extended with custom Python tools"), 37 | ("Development", "npcsh is a Python-based command-line tool"), 38 | ] 39 | 40 | for group, fact in relationships: 41 | assign_fact_to_group(conn, fact, group) 42 | 43 | # Now visualize 44 | visualize_graph(conn) 45 | conn.close() 46 | -------------------------------------------------------------------------------- /tests/test_npcsh.py: -------------------------------------------------------------------------------- 1 | import pexpect 2 | import sys 3 | import time 4 | import re 5 | import pytest 6 | from npcpy.memory.command_history import CommandHistory 7 | from npcpy.llm_funcs import ( 8 | get_llm_response, 9 | execute_llm_command, 10 | generate_image, 11 | check_llm_command, 12 | ) 13 | 14 | 15 | def test_npcsh(): 16 | # Start the npcsh process 17 | npcsh = pexpect.spawn("npcsh", encoding="utf-8", timeout=30) 18 | npcsh.logfile = sys.stdout # Log output to console for visibility 19 | 20 | # Wait for the prompt 21 | npcsh.expect("npcsh>") 22 | 23 | # Test 1: Compile the foreman NPC 24 | npcsh.sendline("/compile foreman.npc") 25 | npcsh.expect("Compiled NPC profile:") 26 | npcsh.expect("npcsh>") 27 | 28 | # Test 2: Switch to foreman NPC 29 | npcsh.sendline("/foreman") 30 | npcsh.expect("Switched to NPC: foreman") 31 | npcsh.expect("foreman>") 32 | 33 | # Test 3: Test weather_jinx 34 | npcsh.sendline("What's the weather in Tokyo?") 35 | # Expect the assistant to provide a weather update 36 | npcsh.expect("The weather in .* is", timeout=60) 37 | npcsh.expect("foreman>") 38 | print("Test 3 passed: weather_jinx executed successfully.") 39 | time.sleep(1) 40 | 41 | # Test 4: Test calculator jinx 42 | npcsh.sendline("Calculate the sum of 2 and 3.") 43 | npcsh.expect("The result of .* is 5", timeout=30) 44 | npcsh.expect("foreman>") 45 | print("Test 4 passed: calculator jinx executed successfully.") 46 | time.sleep(1) 47 | 48 | # Test 5: Test database_query jinx 49 | npcsh.sendline("Find all users with the role 'admin'.") 50 | npcsh.expect("Here are the results:", timeout=30) 51 | npcsh.expect("foreman>") 52 | print("Test 5 passed: database_query jinx executed successfully.") 53 | time.sleep(1) 54 | 55 | # Exit npcsh 56 | npcsh.sendline("/exit") 57 | npcsh.expect(pexpect.EOF) 58 | 59 | 60 | def test_command_history(): 61 | db_path = ":memory:" 62 | command_history = CommandHistory(db_path) 63 | command_history.add_command( 64 | "test_command", "test_subcommands", "test_output", "test_location" 65 | ) 66 | history = command_history.get_all() 67 | assert len(history) == 1 68 | assert history[0][2] == "test_command" 69 | command_history.close() 70 | 71 | 72 | def test_llm_functions(): 73 | response = get_llm_response("Hello, how are you?") 74 | assert "response" in response 75 | 76 | command_history = CommandHistory(":memory:") 77 | result = execute_llm_command("echo Hello", command_history) 78 | assert "output" in result 79 | 80 | image_path = generate_image("A sunny day in the park", "dall-e-2", "openai") 81 | assert image_path is not None 82 | 83 | check_result = check_llm_command("echo Hello", command_history) 84 | assert "output" in check_result 85 | 86 | 87 | 88 | 89 | 90 | def test_command_history_search(): 91 | db_path = ":memory:" 92 | command_history = CommandHistory(db_path) 93 | command_history.add_command( 94 | "test_command", "test_subcommands", "test_output", "test_location" 95 | ) 96 | search_results = command_history.search("test_command") 97 | assert len(search_results) == 1 98 | assert search_results[0][2] == "test_command" 99 | command_history.close() 100 | 101 | 102 | def test_npcsh_command_history(): 103 | npcsh = pexpect.spawn("npcsh", encoding="utf-8", timeout=30) 104 | npcsh.logfile = sys.stdout # Log output to console for visibility 105 | 106 | # Wait for the prompt 107 | npcsh.expect("npcsh>") 108 | 109 | # Test command history 110 | npcsh.sendline("echo Hello") 111 | npcsh.expect("Hello") 112 | npcsh.expect("npcsh>") 113 | 114 | npcsh.sendline("/history") 115 | npcsh.expect("1. .* echo Hello") 116 | npcsh.expect("npcsh>") 117 | 118 | # Exit npcsh 119 | npcsh.sendline("/exit") 120 | npcsh.expect(pexpect.EOF) 121 | 122 | 123 | def test_npcsh_llm_functions(): 124 | npcsh = pexpect.spawn("npcsh", encoding="utf-8", timeout=30) 125 | npcsh.logfile = sys.stdout # Log output to console for visibility 126 | 127 | # Wait for the prompt 128 | npcsh.expect("npcsh>") 129 | 130 | # Test LLM command execution 131 | npcsh.sendline("/cmd echo Hello") 132 | npcsh.expect("Hello") 133 | npcsh.expect("npcsh>") 134 | 135 | # Test LLM question execution 136 | npcsh.sendline("/question What is the capital of France?") 137 | npcsh.expect("The capital of France is Paris.") 138 | npcsh.expect("npcsh>") 139 | 140 | # Exit npcsh 141 | npcsh.sendline("/exit") 142 | npcsh.expect(pexpect.EOF) 143 | 144 | 145 | def test_npcsh_npc_compilation(): 146 | npcsh = pexpect.spawn("npcsh", encoding="utf-8", timeout=30) 147 | npcsh.logfile = sys.stdout # Log output to console for visibility 148 | 149 | # Wait for the prompt 150 | npcsh.expect("npcsh>") 151 | 152 | # Test NPC compilation 153 | npcsh.sendline("/compile foreman.npc") 154 | npcsh.expect("Compiled NPC profile:") 155 | npcsh.expect("npcsh>") 156 | 157 | # Test jinx execution 158 | npcsh.sendline("/foreman") 159 | npcsh.expect("Switched to NPC: foreman") 160 | npcsh.expect("foreman>") 161 | 162 | npcsh.sendline("Calculate the sum of 2 and 3.") 163 | npcsh.expect("The result of .* is 5", timeout=30) 164 | npcsh.expect("foreman>") 165 | 166 | # Exit npcsh 167 | npcsh.sendline("/exit") 168 | npcsh.expect(pexpect.EOF) 169 | -------------------------------------------------------------------------------- /tests/test_npcteam.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import os 4 | from npcsh.npc_compiler import NPC, Team, Jinx 5 | 6 | 7 | # Create test data and save to CSV 8 | def create_test_data(filepath="sales_data.csv"): 9 | sales_data = pd.DataFrame( 10 | { 11 | "date": pd.date_range(start="2024-01-01", periods=90), 12 | "revenue": np.random.normal(10000, 2000, 90), 13 | "customer_count": np.random.poisson(100, 90), 14 | "avg_ticket": np.random.normal(100, 20, 90), 15 | "region": np.random.choice(["North", "South", "East", "West"], 90), 16 | "channel": np.random.choice(["Online", "Store", "Mobile"], 90), 17 | } 18 | ) 19 | 20 | # Add patterns to make data more realistic 21 | sales_data["revenue"] *= 1 + 0.3 * np.sin( 22 | np.pi * np.arange(90) / 30 23 | ) # Seasonal pattern 24 | sales_data.loc[sales_data["channel"] == "Mobile", "revenue"] *= 1.1 # Mobile growth 25 | sales_data.loc[ 26 | sales_data["channel"] == "Online", "customer_count" 27 | ] *= 1.2 # Online customer growth 28 | 29 | sales_data.to_csv(filepath, index=False) 30 | return filepath, sales_data 31 | 32 | 33 | code_execution_jinx = Jinx( 34 | { 35 | "jinx_name": "execute_code", 36 | "description": """Executes a Python code block with access to pandas, 37 | numpy, and matplotlib. 38 | Results should be stored in the 'results' dict to be returned. 39 | The only input should be a single code block with \n characters included. 40 | The code block must use only the libraries or methods contained withen the 41 | pandas, numpy, and matplotlib libraries or using builtin methods. 42 | do not include any json formatting or markdown formatting. 43 | 44 | When generating your script, the final output must be encoded in a variable 45 | named "output". e.g. 46 | 47 | output = some_analysis_function(inputs, derived_data_from_inputs) 48 | Adapt accordingly based on the scope of the analysis 49 | 50 | """, 51 | "inputs": ["script"], 52 | "steps": [ 53 | { 54 | "engine": "python", 55 | "code": """{{script}}""", 56 | } 57 | ], 58 | } 59 | ) 60 | 61 | # Analytics team definition 62 | analytics_team = [ 63 | { 64 | "name": "analyst", 65 | "primary_directive": "You analyze sales performance data, focusing on revenue trends, customer behavior metrics, and market indicators. Your expertise is in extracting actionable insights from complex datasets.", 66 | "model": "gpt-4o-mini", 67 | "provider": "openai", 68 | "jinxs": [code_execution_jinx], # Only the code execution jinx 69 | }, 70 | { 71 | "name": "researcher", 72 | "primary_directive": "You specialize in causal analysis and experimental design. Given data insights, you determine what factors drive observed patterns and design tests to validate hypotheses.", 73 | "model": "gpt-4o-mini", 74 | "provider": "openai", 75 | "jinxs": [code_execution_jinx], # Only the code execution jinx 76 | }, 77 | { 78 | "name": "engineer", 79 | "primary_directive": "You implement data pipelines and optimize data processing. When given analysis requirements, you create efficient workflows to automate insights generation.", 80 | "model": "gpt-4o-mini", 81 | "provider": "openai", 82 | "jinxs": [code_execution_jinx], # Only the code execution jinx 83 | }, 84 | ] 85 | 86 | 87 | def create_analytics_team(): 88 | # Initialize NPCs with just the code execution jinx 89 | npcs = [] 90 | for npc_data in analytics_team: 91 | npc = NPC( 92 | name=npc_data["name"], 93 | primary_directive=npc_data["primary_directive"], 94 | model=npc_data["model"], 95 | provider=npc_data["provider"], 96 | jinxs=[code_execution_jinx], # Only code execution jinx 97 | ) 98 | npcs.append(npc) 99 | 100 | # Create coordinator with just code execution jinx 101 | coordinator = NPC( 102 | name="coordinator", 103 | primary_directive="You coordinate the analytics team, ensuring each specialist contributes their expertise effectively. You synthesize insights and manage the workflow.", 104 | model="gpt-4o-mini", 105 | provider="openai", 106 | jinxs=[code_execution_jinx], # Only code execution jinx 107 | ) 108 | 109 | # Create team 110 | team = Team(npcs=npcs, foreman=coordinator) 111 | return team 112 | 113 | 114 | def main(): 115 | # Create and save test data 116 | data_path, sales_data = create_test_data() 117 | 118 | # Initialize team 119 | team = create_analytics_team() 120 | 121 | # Run analysis - updated prompt to reflect code execution approach 122 | results = team.orchestrate( 123 | f""" 124 | Analyze the sales data at {data_path} to: 125 | 1. Identify key performance drivers 126 | 2. Determine if mobile channel growth is significant 127 | 3. Recommend tests to validate growth hypotheses 128 | 129 | Here is a header for the data file at {data_path}: 130 | {sales_data.head()} 131 | 132 | When working with dates, ensure that date columns are converted from raw strings. e.g. use the pd.to_datetime function. 133 | 134 | 135 | When working with potentially messy data, handle null values by using nan versions of numpy functions or 136 | by filtering them with a mask . 137 | 138 | Use Python code execution to perform the analysis - load the data and perform statistical analysis directly. 139 | """ 140 | ) 141 | 142 | print(results) 143 | 144 | # Cleanup 145 | os.remove(data_path) 146 | 147 | 148 | if __name__ == "__main__": 149 | main() 150 | -------------------------------------------------------------------------------- /tests/test_openai_image_edit.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import base64 3 | client = OpenAI() 4 | prompt = """ 5 | make a logo for a product called guac that has guac in dark green 6 | and then in the "a" of the word is a bowl of guacamole. 7 | """ 8 | result = client.images.edit( 9 | model="gpt-image-1", 10 | image= open("guac_shell.png", "rb"), 11 | prompt=prompt 12 | ) 13 | image_base64 = result.data[0].b64_json 14 | image_bytes = base64.b64decode(image_base64) 15 | # Save the image to a file 16 | with open("guac_logo_edit.png", "wb") as f: 17 | f.write(image_bytes) -------------------------------------------------------------------------------- /tests/test_shell_helpers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import sqlite3 4 | import tempfile 5 | from pathlib import Path 6 | from npcpy.modes.shell_helpers import execute_command 7 | from npcpy.memory.command_history import CommandHistory 8 | from npcpy.npc_sysenv import ( 9 | get_system_message, 10 | lookup_provider, 11 | NPCSH_STREAM_OUTPUT, 12 | get_available_tables, 13 | ) 14 | 15 | def test_execute_slash_commands(): 16 | """Test various slash commands""" 17 | 18 | result = execute_command("/help") 19 | assert "Available Commands" in result["output"] 20 | 21 | 22 | def test_execute_command_with_model_override(): 23 | """Test command execution with model override""" 24 | result = execute_command( 25 | "@gpt-4o-mini What is 2+2?", 26 | ) 27 | assert result["output"] is not None 28 | 29 | 30 | def test_execute_command_who_was_simon_bolivar(): 31 | """Test the command for querying information about Simón Bolívar.""" 32 | result = execute_command( 33 | "What country was Simon Bolivar born in?", 34 | ) 35 | assert "venezuela" in str(result["output"]).lower() 36 | 37 | 38 | def test_execute_command_capital_of_france(): 39 | """Test the command for querying the capital of France.""" 40 | result = execute_command("What is the capital of France?") 41 | assert "paris" in str(result["output"]).lower() 42 | 43 | 44 | def test_execute_command_weather_info( ): 45 | """Test the command for getting weather information.""" 46 | result = execute_command( 47 | "search the web for the weather in Tokyo?" 48 | ) 49 | print(result) # Add print for debugging 50 | assert "tokyo" in str(result["output"]).lower() 51 | 52 | 53 | def test_execute_command_linked_list_implementation(): 54 | """Test the command for querying linked list implementation in Python.""" 55 | result = execute_command( 56 | " Tell me a way to implement a linked list in Python?", 57 | ) 58 | assert "class Node:" in str(result["output"]) 59 | assert "class LinkedList:" in str(result["output"]) 60 | 61 | 62 | def test_execute_command_inquiry_with_npcs( ): 63 | """Test inquiry using NPCs.""" 64 | result = execute_command( 65 | "/search -p duckduckgo who is the current us president", 66 | ) 67 | assert "President" in result["output"] # Check for presence of expected output 68 | 69 | 70 | def test_execute_command_rag_search( ): 71 | """Test the command for a RAG search.""" 72 | result = execute_command( 73 | "/rag -f dummy_linked_list.py linked list", 74 | ) 75 | 76 | print(result) # Print the result for debugging visibility 77 | # Instead of specific class search, check if it includes any relevant text 78 | assert ( 79 | "Found similar texts:" in result["output"] 80 | ) # Check for invocation acknowledgement 81 | assert "linked" in result["output"].lower() # Check for mention of linked list 82 | -------------------------------------------------------------------------------- /tests/test_stream_with_interrupts.py: -------------------------------------------------------------------------------- 1 | from npcsh.llm_funcs import stream_with_interrupts 2 | 3 | messages = [ 4 | { 5 | "role": "system", 6 | "content": "You are a helpful assistant. Ask questions naturally when you need more information.", 7 | }, 8 | {"role": "user", "content": "What's the weather like there?"}, 9 | ] 10 | 11 | # Run the streaming conversation and print the output 12 | for response in stream_with_interrupts( 13 | messages=messages, model="gpt-4o-mini", provider="openai" 14 | ): 15 | print(response, end="", flush=True) 16 | -------------------------------------------------------------------------------- /tests/test_tars.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NPC-Worldwide/npcpy/07b34f4a7fc65aa22ed4ac5f7b6e541a177e0756/tests/test_tars.py --------------------------------------------------------------------------------