├── .env.sample ├── .eslintignore ├── .eslintrc ├── .gitignore ├── .release-it.json ├── LICENSE ├── README.md ├── SECURITY.md ├── config.js ├── config ├── default.example.json ├── default.json └── dynamicPathwaysConfig.example.json ├── helper-apps ├── cortex-autogen │ ├── .funcignore │ ├── .gitignore │ ├── Dockerfile │ ├── OAI_CONFIG_LIST │ ├── agents.py │ ├── agents_extra.py │ ├── config.py │ ├── data_operations.py │ ├── function_app.py │ ├── host.json │ ├── main.py │ ├── prompts.py │ ├── prompts_extra.py │ ├── requirements.txt │ ├── search.py │ ├── test.sh │ ├── tools │ │ └── sasfileuploader.py │ └── utils.py ├── cortex-browser │ ├── .funcignore │ ├── .gitignore │ ├── Dockerfile │ ├── function_app.py │ ├── host.json │ └── requirements.txt ├── cortex-file-handler │ ├── .env.test.azure.sample │ ├── .env.test.gcs.sample │ ├── .env.test.sample │ ├── .gitignore │ ├── Dockerfile │ ├── INTERFACE.md │ ├── function.json │ ├── package-lock.json │ ├── package.json │ ├── scripts │ │ ├── setup-azure-container.js │ │ ├── setup-test-containers.js │ │ ├── test-azure.sh │ │ └── test-gcs.sh │ ├── src │ │ ├── blobHandler.js │ │ ├── constants.js │ │ ├── docHelper.js │ │ ├── fileChunker.js │ │ ├── helper.js │ │ ├── index.js │ │ ├── localFileHandler.js │ │ ├── redis.js │ │ ├── services │ │ │ ├── ConversionService.js │ │ │ ├── FileConversionService.js │ │ │ └── storage │ │ │ │ ├── AzureStorageProvider.js │ │ │ │ ├── GCSStorageProvider.js │ │ │ │ ├── LocalStorageProvider.js │ │ │ │ ├── StorageFactory.js │ │ │ │ ├── StorageProvider.js │ │ │ │ └── StorageService.js │ │ ├── start.js │ │ └── utils │ │ │ └── filenameUtils.js │ └── tests │ │ ├── FileConversionService.test.js │ │ ├── blobHandler.test.js │ │ ├── conversionResilience.test.js │ │ ├── fileChunker.test.js │ │ ├── fileUpload.test.js │ │ ├── files │ │ ├── DOCX_TestPage.docx │ │ └── tests-example.xls │ │ ├── getOperations.test.js │ │ ├── postOperations.test.js │ │ ├── start.test.js │ │ ├── storage │ │ ├── AzureStorageProvider.test.js │ │ ├── GCSStorageProvider.test.js │ │ ├── LocalStorageProvider.test.js │ │ ├── StorageFactory.test.js │ │ └── StorageService.test.js │ │ └── testUtils.helper.js ├── cortex-markitdown │ ├── .funcignore │ ├── .gitignore │ ├── MarkitdownConverterFunction │ │ ├── __init__.py │ │ └── function.json │ ├── README.md │ ├── host.json │ └── requirements.txt ├── cortex-realtime-voice-server │ ├── .env.sample │ ├── .gitignore │ ├── README.md │ ├── bun.lockb │ ├── client │ │ ├── .gitignore │ │ ├── bun.lockb │ │ ├── index.html │ │ ├── package.json │ │ ├── postcss.config.js │ │ ├── public │ │ │ ├── favicon.ico │ │ │ ├── index.html │ │ │ ├── logo192.png │ │ │ ├── logo512.png │ │ │ ├── manifest.json │ │ │ ├── robots.txt │ │ │ └── sounds │ │ │ │ ├── connect.mp3 │ │ │ │ └── disconnect.mp3 │ │ ├── src │ │ │ ├── App.test.tsx │ │ │ ├── App.tsx │ │ │ ├── SettingsModal.tsx │ │ │ ├── chat │ │ │ │ ├── Chat.tsx │ │ │ │ ├── ChatBubble.tsx │ │ │ │ ├── ChatBubbleLeft.tsx │ │ │ │ ├── ChatBubbleRight.tsx │ │ │ │ ├── ChatMessage.tsx │ │ │ │ ├── ChatMessageInput.tsx │ │ │ │ ├── ChatTile.tsx │ │ │ │ ├── audio │ │ │ │ │ ├── SoundEffects.ts │ │ │ │ │ ├── WavPacker.ts │ │ │ │ │ ├── WavRecorder.ts │ │ │ │ │ ├── WavStreamPlayer.ts │ │ │ │ │ ├── analysis │ │ │ │ │ │ ├── AudioAnalysis.ts │ │ │ │ │ │ └── constants.ts │ │ │ │ │ └── worklets │ │ │ │ │ │ ├── AudioProcessor.ts │ │ │ │ │ │ └── StreamProcessor.ts │ │ │ │ ├── components │ │ │ │ │ ├── AudioVisualizer.tsx │ │ │ │ │ ├── CopyButton.tsx │ │ │ │ │ ├── ImageOverlay.tsx │ │ │ │ │ ├── MicrophoneVisualizer.tsx │ │ │ │ │ └── ScreenshotCapture.tsx │ │ │ │ ├── hooks │ │ │ │ │ └── useWindowResize.ts │ │ │ │ └── utils │ │ │ │ │ └── audio.ts │ │ │ ├── index.css │ │ │ ├── index.tsx │ │ │ ├── logo.svg │ │ │ ├── react-app-env.d.ts │ │ │ ├── reportWebVitals.ts │ │ │ ├── setupTests.ts │ │ │ └── utils │ │ │ │ └── logger.ts │ │ ├── tailwind.config.js │ │ ├── tsconfig.json │ │ └── vite.config.ts │ ├── index.ts │ ├── package.json │ ├── src │ │ ├── ApiServer.ts │ │ ├── SocketServer.ts │ │ ├── Tools.ts │ │ ├── cortex │ │ │ ├── expert.ts │ │ │ ├── image.ts │ │ │ ├── memory.ts │ │ │ ├── reason.ts │ │ │ ├── search.ts │ │ │ ├── style.ts │ │ │ ├── utils.ts │ │ │ └── vision.ts │ │ ├── realtime │ │ │ ├── client.ts │ │ │ ├── realtimeTypes.ts │ │ │ ├── socket.ts │ │ │ ├── transcription.ts │ │ │ └── utils.ts │ │ └── utils │ │ │ ├── logger.ts │ │ │ └── prompt.ts │ └── tsconfig.json └── cortex-whisper-wrapper │ ├── .dockerignore │ ├── Dockerfile │ ├── app.py │ ├── docker-compose.debug.yml │ ├── docker-compose.yml │ ├── models │ └── .gitkeep │ └── requirements.txt ├── index.js ├── lib ├── cortexRequest.js ├── crypto.js ├── encodeCache.js ├── entityConstants.js ├── fastLruCache.js ├── gcpAuthTokenHelper.js ├── handleBars.js ├── keyValueStorageClient.js ├── logger.js ├── pathwayManager.js ├── pathwayTools.js ├── promiser.js ├── redisSubscription.js ├── requestExecutor.js ├── requestMonitor.js └── util.js ├── package-lock.json ├── package.json ├── pathways ├── basePathway.js ├── bias.js ├── bing.js ├── call_tools.js ├── categorize.js ├── chat.js ├── chat_code.js ├── chat_context.js ├── chat_jarvis.js ├── chat_persist.js ├── chat_title.js ├── code_human_input.js ├── code_review.js ├── cognitive_delete.js ├── cognitive_insert.js ├── cognitive_search.js ├── complete.js ├── dynamic │ └── pathways.json ├── edit.js ├── embeddings.js ├── entities.js ├── expand_story.js ├── format_paragraph_turbo.js ├── format_summarization.js ├── gemini_15_vision.js ├── gemini_vision.js ├── grammar.js ├── hashtags.js ├── headline.js ├── headline_custom.js ├── highlights.js ├── image.js ├── image_flux.js ├── image_recraft.js ├── jira_story.js ├── keywords.js ├── language.js ├── locations.js ├── paraphrase.js ├── quotes.js ├── rag.js ├── rag_jarvis.js ├── rag_search_helper.js ├── readme.js ├── release_notes.js ├── remove_content.js ├── retrieval.js ├── select_extension.js ├── select_services.js ├── sentiment.js ├── spelling.js ├── story_angles.js ├── styleguide │ └── styleguide.js ├── styleguidemulti.js ├── subhead.js ├── summarize_turbo.js ├── summary.js ├── system │ ├── entity │ │ ├── memory │ │ │ ├── shared │ │ │ │ └── sys_memory_helpers.js │ │ │ ├── sys_memory_format.js │ │ │ ├── sys_memory_lookup_required.js │ │ │ ├── sys_memory_manager.js │ │ │ ├── sys_memory_process.js │ │ │ ├── sys_memory_required.js │ │ │ ├── sys_memory_topic.js │ │ │ ├── sys_memory_update.js │ │ │ ├── sys_read_memory.js │ │ │ ├── sys_save_memory.js │ │ │ └── sys_search_memory.js │ │ ├── sys_entity_agent.js │ │ ├── sys_entity_continue.js │ │ ├── sys_entity_start.js │ │ ├── sys_generator_ack.js │ │ ├── sys_generator_error.js │ │ ├── sys_generator_expert.js │ │ ├── sys_generator_image.js │ │ ├── sys_generator_memory.js │ │ ├── sys_generator_quick.js │ │ ├── sys_generator_reasoning.js │ │ ├── sys_generator_results.js │ │ ├── sys_generator_video_vision.js │ │ ├── sys_generator_voice_converter.js │ │ ├── sys_generator_voice_filler.js │ │ ├── sys_generator_voice_sample.js │ │ ├── sys_get_entities.js │ │ ├── sys_image_prompt_builder.js │ │ ├── sys_query_builder.js │ │ ├── sys_router_code.js │ │ ├── sys_router_tool.js │ │ └── tools │ │ │ ├── shared │ │ │ └── sys_entity_tools.js │ │ │ ├── sys_tool_bing_search.js │ │ │ ├── sys_tool_browser.js │ │ │ ├── sys_tool_browser_jina.js │ │ │ ├── sys_tool_callmodel.js │ │ │ ├── sys_tool_coding.js │ │ │ ├── sys_tool_codingagent.js │ │ │ ├── sys_tool_cognitive_search.js │ │ │ ├── sys_tool_image.js │ │ │ ├── sys_tool_readfile.js │ │ │ ├── sys_tool_reasoning.js │ │ │ ├── sys_tool_remember.js │ │ │ └── sys_tool_verify.js │ ├── rest_streaming │ │ ├── sys_claude_35_sonnet.js │ │ ├── sys_claude_3_haiku.js │ │ ├── sys_google_gemini_chat.js │ │ ├── sys_ollama_chat.js │ │ ├── sys_ollama_completion.js │ │ ├── sys_openai_chat.js │ │ ├── sys_openai_chat_gpt4.js │ │ ├── sys_openai_chat_gpt4_32.js │ │ ├── sys_openai_chat_gpt4_turbo.js │ │ ├── sys_openai_chat_o1.js │ │ ├── sys_openai_chat_o3_mini.js │ │ └── sys_openai_completion.js │ ├── sys_parse_numbered_object_list.js │ ├── sys_repair_json.js │ └── workspaces │ │ ├── run_claude35_sonnet.js │ │ ├── run_claude3_haiku.js │ │ ├── run_gpt35turbo.js │ │ ├── run_gpt4.js │ │ └── run_gpt4_32.js ├── tags.js ├── taxonomy.js ├── test_cohere_summarize.js ├── timeline.js ├── topics.js ├── topics_sentiment.js ├── transcribe.js ├── transcribe_gemini.js ├── transcribe_neuralspace.js ├── translate.js ├── translate_azure.js ├── translate_context.js ├── translate_gpt4.js ├── translate_gpt4_omni.js ├── translate_gpt4_turbo.js ├── translate_subtitle.js ├── translate_subtitle_helper.js ├── translate_turbo.js └── vision.js ├── server ├── chunker.js ├── graphql.js ├── modelExecutor.js ├── parser.js ├── pathwayResolver.js ├── pathwayResponseParser.js ├── plugins │ ├── azureBingPlugin.js │ ├── azureCognitivePlugin.js │ ├── azureTranslatePlugin.js │ ├── azureVideoTranslatePlugin.js │ ├── claude3VertexPlugin.js │ ├── cohereGeneratePlugin.js │ ├── cohereSummarizePlugin.js │ ├── gemini15ChatPlugin.js │ ├── gemini15VisionPlugin.js │ ├── geminiChatPlugin.js │ ├── geminiVisionPlugin.js │ ├── localModelPlugin.js │ ├── modelPlugin.js │ ├── neuralSpacePlugin.js │ ├── ollamaChatPlugin.js │ ├── ollamaCompletionPlugin.js │ ├── openAiChatExtensionPlugin.js │ ├── openAiChatPlugin.js │ ├── openAiCompletionPlugin.js │ ├── openAiDallE3Plugin.js │ ├── openAiEmbeddingsPlugin.js │ ├── openAiImagePlugin.js │ ├── openAiReasoningPlugin.js │ ├── openAiReasoningVisionPlugin.js │ ├── openAiVisionPlugin.js │ ├── openAiWhisperPlugin.js │ ├── replicateApiPlugin.js │ └── runwareAiPlugin.js ├── prompt.js ├── pubsub.js ├── requestState.js ├── resolver.js ├── rest.js ├── subscriptions.js └── typeDef.js ├── start.js └── tests ├── agentic.test.js ├── call_tools.test.js ├── chunkfunction.test.js ├── claude3VertexPlugin.test.js ├── claude3VertexToolConversion.test.js ├── config.test.js ├── data ├── largecontent.txt └── mixedcontent.txt ├── encodeCache.test.js ├── fastLruCache.test.js ├── handleBars.test.js ├── main.test.js ├── memoryfunction.test.js ├── mocks.js ├── modelPlugin.test.js ├── multimodal_conversion.test.js ├── openAiChatPlugin.test.js ├── openAiToolPlugin.test.js ├── openai_api.test.js ├── parser.test.js ├── pathwayResolver.test.js ├── requestMonitor.test.js ├── requestMonitorDurationEstimator.test.js ├── server.js ├── streaming.test.js ├── subchunk.srt ├── subhorizontal.srt ├── sublong.srt ├── subscription.test.js ├── tokenHandlingTests.test.js ├── translate_srt.test.js ├── truncateMessages.test.js ├── util.test.js └── vision.test.js /.env.sample: -------------------------------------------------------------------------------- 1 | AZURE_OAI_API_KEY=_______________________ -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | # Ignore build artifacts 2 | /dist 3 | /build 4 | 5 | # Ignore node_modules 6 | /node_modules 7 | 8 | # Ignore log files 9 | *.log 10 | 11 | # Ignore any config files 12 | .env 13 | .env.* 14 | 15 | # Ignore coverage reports 16 | /coverage 17 | 18 | # Ignore helper apps 19 | /helper-apps 20 | 21 | # Ignore documentation 22 | /docs 23 | 24 | # Ignore tests 25 | /tests 26 | 27 | # Ignore any generated or bundled files 28 | *.min.js 29 | *.bundle.js 30 | 31 | # Ignore any files generated by your IDE or text editor 32 | .idea/ 33 | .vscode/ 34 | *.sublime-* 35 | *.iml 36 | *.swp -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "browser": true, 4 | "es2021": true, 5 | "node": true 6 | }, 7 | "extends": [ 8 | "eslint:recommended" 9 | ], 10 | "parserOptions": { 11 | "ecmaVersion": "latest", 12 | "sourceType": "module" 13 | }, 14 | "plugins": [ 15 | "import" 16 | ], 17 | "rules": { 18 | "import/no-unresolved": "error", 19 | "import/no-extraneous-dependencies": ["error", {"devDependencies": true}], 20 | "no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], 21 | "no-useless-escape": "off" 22 | }, 23 | "settings": { 24 | "import/resolver": { 25 | "node": { 26 | "extensions": [".js"], 27 | "moduleDirectory": ["node_modules", "src"] 28 | } 29 | }, 30 | "import/core-modules": ["ava"] 31 | } 32 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules/ 3 | .env 4 | .vscode/ 5 | **/__pycache__ 6 | **/.venv 7 | .aider* 8 | -------------------------------------------------------------------------------- /.release-it.json: -------------------------------------------------------------------------------- 1 | { 2 | "git": { 3 | "commitMessage": "chore: release v${version}" 4 | }, 5 | "github": { 6 | "release": true, 7 | "autoGenerate": true 8 | } 9 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Al Jazeera Media Network 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | We take the security of our project seriously. The table below shows the versions of Cortex currently being supported with security updates. 6 | 7 | | Version | Supported | 8 | | ------- | ------------------ | 9 | | 1.x.x | :white_check_mark: | 10 | 11 | ## Reporting a Vulnerability 12 | 13 | If you have discovered a security vulnerability in Cortex, please follow these steps to report it: 14 | 15 | 1. **Do not** create a public GitHub issue, as this might expose the vulnerability to others. 16 | 2. Please follow the GitHub process for [Privately Reporting a Security Vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) 17 | 18 | ## Disclosure Policy 19 | 20 | Cortex follows responsible disclosure practices. Once a vulnerability is confirmed and a fix is developed, we will release a security update and publicly disclose the vulnerability. We will credit the reporter of the vulnerability in the disclosure, unless the reporter wishes to remain anonymous. 21 | 22 | We appreciate your help in keeping Cortex secure and your responsible disclosure of any security vulnerabilities you discover. 23 | -------------------------------------------------------------------------------- /config/default.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /config/dynamicPathwaysConfig.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "storageType": "azure", 3 | "filePath": "./pathways/dynamic/pathways.json", 4 | "publishKey": "development" 5 | } -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/.funcignore: -------------------------------------------------------------------------------- 1 | .git* 2 | .vscode 3 | __azurite_db*__.json 4 | __blobstorage__ 5 | __queuestorage__ 6 | local.settings.json 7 | test 8 | .venv -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY . . 9 | 10 | CMD ["python", "main.py"] -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/OAI_CONFIG_LIST: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "model": "claude-3.7-sonnet", 4 | "price": [0,0] 5 | } 6 | ] -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/agents_extra.py: -------------------------------------------------------------------------------- 1 | from config import prompts 2 | from datetime import datetime 3 | 4 | def process_helper_results(helper_decider_result, original_request_message, context, chat): 5 | def add_to_context(result, prefix): 6 | nonlocal context 7 | context += f"\n\n{prefix}: {result}" 8 | 9 | if helper_decider_result.get("sql"): 10 | sql_message = f"Use SQL to help solving task, provide any related data and code that may help: {original_request_message}." 11 | result = chat(prompts.get("SQL_PROMPT"), sql_message, return_type="all_as_str") 12 | add_to_context(result, "SQL results") 13 | 14 | return context 15 | -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | 4 | load_dotenv() 5 | 6 | AZURE_STORAGE_CONNECTION_STRING = os.environ["AZURE_STORAGE_CONNECTION_STRING"] 7 | HUMAN_INPUT_QUEUE_NAME = os.environ.get("HUMAN_INPUT_QUEUE_NAME", "autogen-human-input-queue") 8 | REDIS_CONNECTION_STRING = os.environ['REDIS_CONNECTION_STRING'] 9 | REDIS_CHANNEL = 'requestProgress' 10 | AZURE_BLOB_CONTAINER = os.environ.get("AZURE_BLOB_CONTAINER", "autogen-uploads") 11 | 12 | 13 | # Prompts 14 | import prompts 15 | import prompts_extra 16 | 17 | prompts = {**prompts.__dict__, **prompts_extra.__dict__} 18 | 19 | -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/data_operations.py: -------------------------------------------------------------------------------- 1 | from azure.storage.queue import QueueClient 2 | import pymongo 3 | import os 4 | import logging 5 | import json 6 | import base64 7 | from config import AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME 8 | 9 | human_input_queue_client = QueueClient.from_connection_string(AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME) 10 | 11 | def store_in_mongo(data): 12 | try: 13 | if 'MONGO_URI' in os.environ: 14 | client = pymongo.MongoClient(os.environ['MONGO_URI']) 15 | collection = client.get_default_database()[os.environ.get('MONGO_COLLECTION_NAME', 'autogenruns')] 16 | collection.insert_one(data) 17 | else: 18 | logging.warning("MONGO_URI not found in environment variables") 19 | except Exception as e: 20 | logging.error(f"An error occurred while storing data in MongoDB: {str(e)}") 21 | 22 | def check_for_human_input(request_id): 23 | messages = human_input_queue_client.receive_messages() 24 | for message in messages: 25 | content = json.loads(base64.b64decode(message.content).decode('utf-8')) 26 | if content['codeRequestId'] == request_id: 27 | human_input_queue_client.delete_message(message) 28 | return content['text'] 29 | return None -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/function_app.py: -------------------------------------------------------------------------------- 1 | import azure.functions as func 2 | import logging 3 | import json 4 | from azure.storage.queue import QueueClient 5 | import os 6 | import redis 7 | from agents import process_message 8 | import subprocess 9 | import sys 10 | import config 11 | import requests 12 | 13 | logging.getLogger().setLevel(logging.WARNING) 14 | 15 | import subprocess, sys, importlib 16 | required_packages = ['requests', 'azure-storage-blob'] # Add any and all other required packages 17 | for package in required_packages: 18 | try: 19 | importlib.import_module(package) 20 | except ImportError: 21 | subprocess.check_call([sys.executable, "-m", "pip", "install", package, "--disable-pip-version-check"], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL) 22 | 23 | 24 | app = func.FunctionApp() 25 | 26 | connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"] 27 | queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue") 28 | queue_client = QueueClient.from_connection_string(connection_string, queue_name) 29 | 30 | redis_client = redis.from_url(os.environ['REDIS_CONNECTION_STRING']) 31 | channel = 'requestProgress' 32 | 33 | 34 | @app.queue_trigger(arg_name="msg", queue_name=queue_name, connection="AZURE_STORAGE_CONNECTION_STRING") 35 | def queue_trigger(msg: func.QueueMessage): 36 | logging.info(f"Queue trigger Message ID: {msg.id}") 37 | try: 38 | message_data = json.loads(msg.get_body().decode('utf-8')) 39 | if "requestId" not in message_data: 40 | message_data['requestId'] = msg.id 41 | process_message(message_data, msg) 42 | 43 | except Exception as e: 44 | logging.error(f"Error processing message: {str(e)}") 45 | -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/host.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0", 3 | "logging": { 4 | "applicationInsights": { 5 | "samplingSettings": { 6 | "isEnabled": true, 7 | "excludedTypes": "Request" 8 | } 9 | } 10 | }, 11 | "extensionBundle": { 12 | "id": "Microsoft.Azure.Functions.ExtensionBundle", 13 | "version": "[4.*, 5.0.0)" 14 | } 15 | } -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from azure.storage.queue import QueueClient 3 | import base64 4 | import json 5 | import time 6 | from agents import process_message 7 | 8 | 9 | def main(): 10 | print("Starting message processing loop") 11 | connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"] 12 | queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue") 13 | 14 | queue_client = QueueClient.from_connection_string(connection_string, queue_name) 15 | 16 | attempts = 0 17 | max_attempts = 1000 18 | 19 | while attempts < max_attempts: 20 | messages = queue_client.receive_messages(messages_per_page=1) 21 | 22 | if messages: 23 | for message in messages: 24 | decoded_content = base64.b64decode(message.content).decode('utf-8') 25 | message_data = json.loads(decoded_content) 26 | if "requestId" not in message_data: 27 | message_data['requestId'] = message.id 28 | process_message(message_data, message) 29 | queue_client.delete_message(message) 30 | attempts = 0 # Reset attempts if a message was processed 31 | else: 32 | attempts += 1 33 | time.sleep(1) # Wait for 1 second before checking again 34 | 35 | print(f"No messages received after {max_attempts} attempts. Exiting.") 36 | 37 | if __name__ == "__main__": 38 | main() -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/prompts_extra.py: -------------------------------------------------------------------------------- 1 | SQL_PROMPT = """ 2 | You are a coder bot that writes SQL code. 3 | Only thing User will do is just run your code. 4 | You can write any SQL code that you think will help User to solve the task. 5 | """ 6 | -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-storage-queue 2 | azure-functions 3 | azure-search-documents 4 | pyautogen==0.3.0 5 | redis 6 | pymongo 7 | requests 8 | azure-storage-blob 9 | mysql-connector-python -------------------------------------------------------------------------------- /helper-apps/cortex-autogen/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load environment variables 4 | if [ -f .env ]; then 5 | export $(cat .env | xargs) 6 | else 7 | echo ".env file not found" 8 | exit 1 9 | fi 10 | 11 | # Check if required variables are set 12 | if [ -z "$AZURE_STORAGE_CONNECTION_STRING" ] || [ -z "$QUEUE_NAME" ]; then 13 | echo "AZURE_STORAGE_CONNECTION_STRING and QUEUE_NAME must be set in .env file" 14 | exit 1 15 | fi 16 | 17 | # Prompt for message if not provided as argument 18 | if [ -z "$1" ]; then 19 | read -p "Enter message: " MESSAGE 20 | else 21 | MESSAGE="$1" 22 | fi 23 | 24 | # Create JSON with message field 25 | JSON_MESSAGE=$(jq -n --arg msg "$MESSAGE" '{"message": $msg}') 26 | 27 | # Encode JSON message to Base64 28 | ENCODED_MESSAGE=$(echo -n "$JSON_MESSAGE" | base64) 29 | 30 | # Send message to queue 31 | az storage message put \ 32 | --connection-string "$AZURE_STORAGE_CONNECTION_STRING" \ 33 | --queue-name "$QUEUE_NAME" \ 34 | --content "$ENCODED_MESSAGE" 35 | 36 | if [ $? -eq 0 ]; then 37 | echo "Message sent successfully." 38 | else 39 | echo "Error sending message." 40 | fi -------------------------------------------------------------------------------- /helper-apps/cortex-browser/.funcignore: -------------------------------------------------------------------------------- 1 | .git* 2 | .vscode 3 | __azurite_db*__.json 4 | __blobstorage__ 5 | __queuestorage__ 6 | local.settings.json 7 | test 8 | .venv -------------------------------------------------------------------------------- /helper-apps/cortex-browser/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image suitable for Azure Functions 2 | FROM mcr.microsoft.com/azure-functions/python:4-python3.11 3 | 4 | # Set environment variables for Azure Functions runtime 5 | ENV AzureWebJobsScriptRoot=/home/site/wwwroot 6 | ENV AzureFunctionsJobHost__Logging__Console__IsEnabled=true 7 | # This might still be useful for Azure deployments, can be kept or removed based on specific SSL/TLS needs for accessing other Azure resources. 8 | ENV WEBSITES_INCLUDE_CLOUD_CERTS=true 9 | 10 | # Install Playwright dependencies 11 | RUN apt-get update && apt-get install -y --no-install-recommends \ 12 | # Common fonts for rendering (can be useful even for headless) 13 | fonts-liberation \ 14 | fonts-noto \ 15 | fontconfig \ 16 | # Clean up APT caches 17 | && rm -rf /var/lib/apt/lists/* 18 | 19 | # Copy requirements file first to leverage Docker cache 20 | COPY requirements.txt /tmp/ 21 | WORKDIR /tmp 22 | 23 | # Install Python dependencies (including playwright) 24 | RUN pip install --no-cache-dir -r requirements.txt 25 | 26 | # Install Playwright browser(s) and their OS dependencies 27 | # This installs Chromium by default along with its necessary OS packages. 28 | # Add other browsers like firefox or webkit if needed: playwright install --with-deps firefox webkit 29 | RUN playwright install --with-deps chromium 30 | 31 | RUN playwright install-deps 32 | 33 | # Copy the function app code to the final location 34 | COPY . /home/site/wwwroot 35 | 36 | # Set the working directory for the function app 37 | WORKDIR /home/site/wwwroot 38 | 39 | # Expose the default Azure Functions port 40 | EXPOSE 80 -------------------------------------------------------------------------------- /helper-apps/cortex-browser/host.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0", 3 | "logging": { 4 | "applicationInsights": { 5 | "samplingSettings": { 6 | "isEnabled": true, 7 | "excludedTypes": "Request" 8 | } 9 | } 10 | }, 11 | "extensionBundle": { 12 | "id": "Microsoft.Azure.Functions.ExtensionBundle", 13 | "version": "[4.*, 5.0.0)" 14 | } 15 | } -------------------------------------------------------------------------------- /helper-apps/cortex-browser/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-functions==1.23.0 2 | babel==2.17.0 3 | certifi==2025.4.26 4 | charset-normalizer==3.4.2 5 | courlan==1.3.2 6 | dateparser==1.2.1 7 | greenlet==3.0.3 8 | htmldate==1.9.3 9 | jusText==3.0.2 10 | lxml==5.4.0 11 | lxml_html_clean==0.4.2 12 | MarkupSafe==3.0.2 13 | pyee==11.1.0 14 | python-dateutil==2.9.0.post0 15 | pytz==2025.2 16 | regex==2024.11.6 17 | six==1.17.0 18 | tld==0.13 19 | trafilatura==2.0.0 20 | typing_extensions==4.13.2 21 | tzlocal==5.3.1 22 | Werkzeug==3.1.3 23 | playwright==1.45.0 24 | aiohttp 25 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/.env.test.azure.sample: -------------------------------------------------------------------------------- 1 | # Test environment configuration for Azure tests 2 | REDIS_CONNECTION_STRING=redis://default:redispw@localhost:32768 3 | AZURE_STORAGE_CONNECTION_STRING=UseDevelopmentStorage=true 4 | AZURE_STORAGE_CONTAINER_NAME=test-container 5 | NODE_ENV=test 6 | PORT=7072 # Different port for testing 7 | MARKITDOWN_CONVERT_URL= #cortex-markitdown url -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/.env.test.gcs.sample: -------------------------------------------------------------------------------- 1 | # Test environment configuration for Azure tests 2 | REDIS_CONNECTION_STRING=redis://default:redispw@localhost:32768 3 | GCP_SERVICE_ACCOUNT_KEY={"project_id":"test-project"} 4 | STORAGE_EMULATOR_HOST=http://localhost:4443 5 | GCS_BUCKETNAME=cortextempfiles 6 | AZURE_STORAGE_CONNECTION_STRING=UseDevelopmentStorage=true 7 | AZURE_STORAGE_CONTAINER_NAME=test-container 8 | NODE_ENV=test 9 | PORT=7072 # Different port for testing 10 | MARKITDOWN_CONVERT_URL= #cortex-markitdown url -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/.env.test.sample: -------------------------------------------------------------------------------- 1 | # Test environment configuration 2 | REDIS_CONNECTION_STRING=redis://default:redispw@localhost:32768 3 | #AZURE_STORAGE_CONNECTION_STRING=UseDevelopmentStorage=true 4 | AZURE_STORAGE_CONTAINER_NAME=test-container 5 | #GCP_SERVICE_ACCOUNT_KEY={"type":"service_account","project_id":"test-project"} 6 | NODE_ENV=test 7 | PORT=7072 # Different port for testing 8 | MARKITDOWN_CONVERT_URL= #cortex-markitdown url -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | 24 | # nyc test coverage 25 | .nyc_output 26 | 27 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 28 | .grunt 29 | 30 | # Bower dependency directory (https://bower.io/) 31 | bower_components 32 | 33 | # node-waf configuration 34 | .lock-wscript 35 | 36 | # Compiled binary addons (https://nodejs.org/api/addons.html) 37 | build/Release 38 | 39 | # Dependency directories 40 | node_modules/ 41 | jspm_packages/ 42 | 43 | # TypeScript v1 declaration files 44 | typings/ 45 | 46 | # Optional npm cache directory 47 | .npm 48 | 49 | # Optional eslint cache 50 | .eslintcache 51 | 52 | # Optional REPL history 53 | .node_repl_history 54 | 55 | # Output of 'npm pack' 56 | *.tgz 57 | 58 | # Yarn Integrity file 59 | .yarn-integrity 60 | 61 | # dotenv environment variables file 62 | .env 63 | .env.test 64 | .env.test.azure 65 | .env.test.gcs 66 | 67 | # parcel-bundler cache (https://parceljs.org/) 68 | .cache 69 | 70 | # next.js build output 71 | .next 72 | 73 | # nuxt.js build output 74 | .nuxt 75 | 76 | # vuepress build output 77 | .vuepress/dist 78 | 79 | # Serverless directories 80 | .serverless/ 81 | 82 | # FuseBox cache 83 | .fusebox/ 84 | 85 | # DynamoDB Local files 86 | .dynamodb/ 87 | 88 | # TypeScript output 89 | dist 90 | out 91 | 92 | # Azure Functions artifacts 93 | bin 94 | obj 95 | appsettings.json 96 | local.settings.json 97 | 98 | # Azurite artifacts 99 | __blobstorage__ 100 | __queuestorage__ 101 | __azurite_db*__.json 102 | 103 | # Python virtual environments and caches 104 | .venv 105 | __pycache__ -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:18-alpine 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY package*.json ./ 6 | 7 | RUN npm install 8 | 9 | ## installing ffmepg 10 | RUN apk update && \ 11 | apk add ffmpeg 12 | 13 | COPY . . 14 | 15 | EXPOSE 7071 16 | 17 | # RUN npm run build 18 | 19 | CMD [ "npm", "start" ] -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindings": [ 3 | { 4 | "authLevel": "function", 5 | "type": "httpTrigger", 6 | "direction": "in", 7 | "name": "req", 8 | "methods": ["get", "post", "delete"] 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@aj-archipelago/cortex-file-handler", 3 | "version": "2.0.02", 4 | "description": "File handling service for Cortex - handles file uploads, media chunking, and document processing", 5 | "type": "module", 6 | "main": "src/index.js", 7 | "scripts": { 8 | "start": "node src/start.js", 9 | "dev": "node -r dotenv/config src/start.js", 10 | "test": "DOTENV_CONFIG_PATH=.env.test NODE_ENV=test node -r dotenv/config node_modules/ava/entrypoints/cli.mjs", 11 | "test:azure": "DOTENV_CONFIG_PATH=.env.test.azure NODE_ENV=test ./scripts/test-azure.sh", 12 | "test:watch": "DOTENV_CONFIG_PATH=.env.test NODE_ENV=test node -r dotenv/config node_modules/ava/entrypoints/cli.mjs --watch", 13 | "test:gcs": "DOTENV_CONFIG_PATH=.env.test.gcs NODE_ENV=test ./scripts/test-gcs.sh" 14 | }, 15 | "dependencies": { 16 | "@azure/storage-blob": "^12.13.0", 17 | "@distube/ytdl-core": "^4.14.3", 18 | "@google-cloud/storage": "^7.10.0", 19 | "axios": "^1.7.4", 20 | "busboy": "^1.6.0", 21 | "cors": "^2.8.5", 22 | "express": "^4.21.1", 23 | "fluent-ffmpeg": "^2.1.3", 24 | "ioredis": "^5.3.1", 25 | "mime-types": "^3.0.1", 26 | "papaparse": "^5.4.1", 27 | "pdfjs-dist": "^4.2.67", 28 | "public-ip": "^6.0.1", 29 | "uuid": "^9.0.0", 30 | "xlsx": "^0.18.5" 31 | }, 32 | "devDependencies": { 33 | "@eslint/js": "^9.26.0", 34 | "ava": "^5.3.1", 35 | "dotenv": "^16.3.1", 36 | "eslint-plugin-import": "^2.31.0", 37 | "globals": "^16.1.0", 38 | "nock": "^13.3.0", 39 | "typescript-eslint": "^8.32.1" 40 | }, 41 | "ava": { 42 | "files": [ 43 | "tests/**/*.test.js", 44 | "!tests/test-files/**/*", 45 | "!tests/test-docs/**/*", 46 | "!tests/mocks/**/*" 47 | ], 48 | "timeout": "1m", 49 | "nodeArguments": [ 50 | "--experimental-modules" 51 | ], 52 | "serial": true 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/scripts/setup-azure-container.js: -------------------------------------------------------------------------------- 1 | import { BlobServiceClient } from '@azure/storage-blob'; 2 | 3 | async function createContainer() { 4 | try { 5 | const blobServiceClient = BlobServiceClient.fromConnectionString( 6 | 'UseDevelopmentStorage=true', 7 | ); 8 | const containerClient = 9 | blobServiceClient.getContainerClient('test-container'); 10 | 11 | console.log('Creating container...'); 12 | await containerClient.create(); 13 | console.log('Container created successfully'); 14 | } catch (error) { 15 | // Ignore if container already exists 16 | if (error.statusCode === 409) { 17 | console.log('Container already exists'); 18 | } else { 19 | console.error('Error creating container:', error); 20 | process.exit(1); 21 | } 22 | } 23 | } 24 | 25 | createContainer(); 26 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/scripts/setup-test-containers.js: -------------------------------------------------------------------------------- 1 | import { BlobServiceClient } from '@azure/storage-blob'; 2 | import { Storage } from '@google-cloud/storage'; 3 | 4 | async function createAzureContainer() { 5 | try { 6 | const blobServiceClient = BlobServiceClient.fromConnectionString( 7 | 'UseDevelopmentStorage=true', 8 | ); 9 | const containerClient = 10 | blobServiceClient.getContainerClient('test-container'); 11 | 12 | console.log('Creating Azure container...'); 13 | await containerClient.create(); 14 | console.log('Azure container created successfully'); 15 | } catch (error) { 16 | // Ignore if container already exists 17 | if (error.statusCode === 409) { 18 | console.log('Azure container already exists'); 19 | } else { 20 | console.error('Error creating Azure container:', error); 21 | process.exit(1); 22 | } 23 | } 24 | } 25 | 26 | async function createGCSBucket() { 27 | try { 28 | const storage = new Storage({ 29 | projectId: 'test-project', 30 | apiEndpoint: 'http://localhost:4443', 31 | }); 32 | 33 | console.log('Creating GCS bucket...'); 34 | await storage.createBucket('cortextempfiles'); 35 | console.log('GCS bucket created successfully'); 36 | } catch (error) { 37 | // Ignore if bucket already exists 38 | if (error.code === 409) { 39 | console.log('GCS bucket already exists'); 40 | } else { 41 | console.error('Error creating GCS bucket:', error); 42 | process.exit(1); 43 | } 44 | } 45 | } 46 | 47 | async function setup() { 48 | await createAzureContainer(); 49 | await createGCSBucket(); 50 | } 51 | 52 | setup(); 53 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/scripts/test-azure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create temp directory for Azurite 4 | AZURITE_DIR="/tmp/azurite-test" 5 | mkdir -p $AZURITE_DIR 6 | 7 | # Start Azurite in background 8 | echo "Starting Azurite..." 9 | azurite --silent --skipApiVersionCheck --location $AZURITE_DIR & 10 | AZURITE_PID=$! 11 | 12 | # Wait for Azurite to start 13 | sleep 2 14 | 15 | # Create test container 16 | echo "Setting up Azure container..." 17 | node scripts/setup-azure-container.js 18 | 19 | # Run the tests 20 | echo "Running tests..." 21 | node -r dotenv/config node_modules/ava/entrypoints/cli.mjs "$@" 22 | 23 | # Store test result 24 | TEST_RESULT=$? 25 | 26 | # Kill Azurite 27 | echo "Cleaning up..." 28 | kill $AZURITE_PID 29 | 30 | # Wait for Azurite to finish cleanup 31 | sleep 2 32 | 33 | # Clean up Azurite directory 34 | rm -rf $AZURITE_DIR 35 | 36 | # Exit with test result 37 | exit $TEST_RESULT -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/scripts/test-gcs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on error 4 | set -e 5 | 6 | cleanup() { 7 | echo "Cleaning up..." 8 | if [ ! -z "$AZURITE_PID" ]; then 9 | kill $AZURITE_PID 2>/dev/null || true 10 | fi 11 | docker stop fake-gcs-server 2>/dev/null || true 12 | docker rm fake-gcs-server 2>/dev/null || true 13 | } 14 | 15 | # Set up cleanup trap 16 | trap cleanup EXIT 17 | 18 | echo "Starting test environment..." 19 | 20 | # Start Azurite if not running 21 | if ! nc -z localhost 10000; then 22 | echo "Starting Azurite..." 23 | azurite --silent --skipApiVersionCheck --location .azurite --debug .azurite/debug.log & 24 | AZURITE_PID=$! 25 | # Wait for Azurite to be ready 26 | until nc -z localhost 10000; do 27 | sleep 1 28 | done 29 | fi 30 | 31 | # Start fake-gcs-server if not running 32 | if ! nc -z localhost 4443; then 33 | echo "Starting fake-gcs-server..." 34 | docker run -d --name fake-gcs-server \ 35 | -p 4443:4443 \ 36 | fsouza/fake-gcs-server -scheme http 37 | # Wait for fake-gcs-server to be ready 38 | until nc -z localhost 4443; do 39 | sleep 1 40 | done 41 | fi 42 | 43 | # Create containers 44 | echo "Setting up test containers..." 45 | node scripts/setup-test-containers.js 46 | 47 | # Run the tests 48 | echo "Running tests..." 49 | node -r dotenv/config node_modules/ava/entrypoints/cli.mjs "$@" -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/src/docHelper.js: -------------------------------------------------------------------------------- 1 | // Utility function for chunking text into smaller pieces 2 | export function easyChunker(text) { 3 | const result = []; 4 | const n = 10000; 5 | 6 | // If the text is less than n characters, just process it as is 7 | if (text.length <= n) { 8 | return [text]; 9 | } 10 | 11 | let startIndex = 0; 12 | while (startIndex < text.length) { 13 | let endIndex = Math.min(startIndex + n, text.length); 14 | 15 | // Make sure we don't split in the middle of a sentence 16 | while ( 17 | endIndex > startIndex && 18 | text[endIndex] !== '.' && 19 | text[endIndex] !== ' ' 20 | ) { 21 | endIndex--; 22 | } 23 | 24 | // If we didn't find a sentence break, just split at n characters 25 | if (endIndex === startIndex) { 26 | endIndex = startIndex + n; 27 | } 28 | 29 | // Push the chunk to the result array 30 | result.push(text.substring(startIndex, endIndex)); 31 | 32 | // Move the start index to the next chunk 33 | startIndex = endIndex; 34 | } 35 | 36 | return result; 37 | } 38 | -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/src/services/FileConversionService.js: -------------------------------------------------------------------------------- 1 | import { ConversionService } from './ConversionService.js'; 2 | import { getFileStoreMap, setFileStoreMap } from '../redis.js'; 3 | import { urlExists } from '../helper.js'; 4 | import { gcsUrlExists, uploadChunkToGCS, gcs } from '../blobHandler.js'; 5 | import { downloadFile } from '../fileChunker.js'; 6 | import { saveFileToBlob } from '../blobHandler.js'; 7 | import { moveFileToPublicFolder } from '../localFileHandler.js'; 8 | import { v4 as uuidv4 } from 'uuid'; 9 | 10 | export class FileConversionService extends ConversionService { 11 | constructor(context, useAzure = true) { 12 | super(context); 13 | this.useAzure = useAzure; 14 | } 15 | 16 | async _getFileStoreMap(key) { 17 | return getFileStoreMap(key); 18 | } 19 | 20 | async _setFileStoreMap(key, value) { 21 | return setFileStoreMap(key, value); 22 | } 23 | 24 | async _urlExists(url) { 25 | return urlExists(url); 26 | } 27 | 28 | async _gcsUrlExists(url) { 29 | return gcsUrlExists(url); 30 | } 31 | 32 | async _downloadFile(url, destination) { 33 | return downloadFile(url, destination); 34 | } 35 | 36 | async _saveConvertedFile(filePath, requestId) { 37 | // Generate a fallback requestId if none supplied (e.g. during checkHash calls) 38 | const reqId = requestId || uuidv4(); 39 | 40 | let fileUrl; 41 | if (this.useAzure) { 42 | const savedBlob = await saveFileToBlob(filePath, reqId); 43 | fileUrl = savedBlob.url; 44 | } else { 45 | fileUrl = await moveFileToPublicFolder(filePath, reqId); 46 | } 47 | return { url: fileUrl }; 48 | } 49 | 50 | async _uploadChunkToGCS(filePath, requestId) { 51 | return uploadChunkToGCS(filePath, requestId); 52 | } 53 | 54 | _isGCSConfigured() { 55 | return !!gcs; 56 | } 57 | } -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Base interface for storage providers 3 | */ 4 | export class StorageProvider { 5 | /** 6 | * Upload a file to storage 7 | * @param {Object} context - The context object 8 | * @param {string} filePath - Path to the file to upload 9 | * @param {string} requestId - Unique identifier for the request 10 | * @param {string} [hash] - Optional hash of the file 11 | * @returns {Promise<{url: string, blobName: string}>} The URL and blob name of the uploaded file 12 | */ 13 | async uploadFile(context, filePath, requestId, hash = null) { 14 | throw new Error('Method not implemented'); 15 | } 16 | 17 | /** 18 | * Delete files associated with a request ID 19 | * @param {string} requestId - The request ID to delete files for 20 | * @returns {Promise} Array of deleted file URLs 21 | */ 22 | async deleteFiles(requestId) { 23 | throw new Error('Method not implemented'); 24 | } 25 | 26 | /** 27 | * Check if a file exists at the given URL 28 | * @param {string} url - The URL to check 29 | * @returns {Promise} Whether the file exists 30 | */ 31 | async fileExists(url) { 32 | throw new Error('Method not implemented'); 33 | } 34 | 35 | /** 36 | * Download a file from storage 37 | * @param {string} url - The URL of the file to download 38 | * @param {string} destinationPath - Where to save the downloaded file 39 | * @returns {Promise} 40 | */ 41 | async downloadFile(url, destinationPath) { 42 | throw new Error('Method not implemented'); 43 | } 44 | 45 | /** 46 | * Clean up files by their URLs 47 | * @param {string[]} urls - Array of URLs to clean up 48 | * @returns {Promise} 49 | */ 50 | async cleanup(urls) { 51 | throw new Error('Method not implemented'); 52 | } 53 | } -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/src/utils/filenameUtils.js: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | 3 | /** 4 | * Sanitize a filename so that it is safe and consistent across all back-ends 5 | * – Decode any existing URI encoding 6 | * – Strip directory components 7 | * – Replace characters that are not alphanum, dash, dot, or underscore with `_` 8 | * – Convert spaces to underscores to avoid unintended encoding by some SDKs 9 | * 10 | * @param {string} raw The raw filename/path/URL component 11 | * @returns {string} A sanitized filename suitable for Azure, GCS, local FS, etc. 12 | */ 13 | export function sanitizeFilename(raw = '') { 14 | let name = raw; 15 | try { 16 | name = decodeURIComponent(name); 17 | } catch (_) { 18 | // Already decoded / not URI encoded – ignore 19 | } 20 | 21 | name = path.basename(name); 22 | // Replace spaces first so they don't become %20 anywhere 23 | name = name.replace(/\s+/g, '_'); 24 | // Replace any remaining invalid characters 25 | name = name.replace(/[^\w\-\.]/g, '_'); 26 | 27 | return name; 28 | } -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/tests/files/DOCX_TestPage.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-file-handler/tests/files/DOCX_TestPage.docx -------------------------------------------------------------------------------- /helper-apps/cortex-file-handler/tests/files/tests-example.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-file-handler/tests/files/tests-example.xls -------------------------------------------------------------------------------- /helper-apps/cortex-markitdown/.funcignore: -------------------------------------------------------------------------------- 1 | .venv -------------------------------------------------------------------------------- /helper-apps/cortex-markitdown/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | dist/ 13 | downloads/ 14 | eggs/ 15 | .eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .nox/ 41 | .coverage 42 | .coverage.*_file 43 | .cache 44 | .pytest_cache/ 45 | .hypothesis/ 46 | 47 | # Translations 48 | *.mo 49 | *.pot 50 | 51 | # Django stuff: 52 | *.log 53 | local_settings.py 54 | db.sqlite3 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # Jupyter Notebook 70 | .ipynb_checkpoints 71 | 72 | # IPython 73 | profile_default/ 74 | ipython_config.py 75 | 76 | # pyenv 77 | .python-version 78 | 79 | # PEP 582; __pypackages__ directory 80 | __pypackages__/ 81 | 82 | # Celery stuff 83 | celerybeat-schedule 84 | 85 | # SageMath files 86 | *.sage.py 87 | 88 | # Environments 89 | .env 90 | .venv 91 | env/ 92 | venv/ 93 | ENV/ 94 | env.bak/ 95 | venv.bak/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | .dmypy.json 110 | dmypy.json 111 | 112 | # Pyre type checker 113 | .pyre/ 114 | 115 | # pytype static analyzer 116 | .pytype/ 117 | 118 | # Azure Functions local settings 119 | local.settings.json 120 | .python_packages -------------------------------------------------------------------------------- /helper-apps/cortex-markitdown/MarkitdownConverterFunction/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "scriptFile": "__init__.py", 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req", 9 | "methods": [ 10 | "get", 11 | "post" 12 | ], 13 | "route": "convert" 14 | }, 15 | { 16 | "type": "http", 17 | "direction": "out", 18 | "name": "$return" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /helper-apps/cortex-markitdown/host.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0", 3 | "logging": { 4 | "applicationInsights": { 5 | "samplingSettings": { 6 | "isEnabled": true, 7 | "excludedTypes": "Request" 8 | } 9 | } 10 | }, 11 | "extensionBundle": { 12 | "id": "Microsoft.Azure.Functions.ExtensionBundle", 13 | "version": "[4.*, 5.0.0)" 14 | } 15 | } -------------------------------------------------------------------------------- /helper-apps/cortex-markitdown/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-functions 2 | markitdown[all]>=0.1.0 -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/.env.sample: -------------------------------------------------------------------------------- 1 | CORTEX_API_KEY=... 2 | CORTEX_DEV_API_KEY=... 3 | OPENAI_API_KEY=... 4 | CORS_HOSTS='["http://localhost:3000"]' 5 | PORT=8081 6 | VOICE_LIB_DEBUG=false 7 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/README.md: -------------------------------------------------------------------------------- 1 | # cortex-realtime-voice 2 | 3 | To install dependencies: 4 | 5 | ```bash 6 | bun install 7 | cd client 8 | bun install 9 | ``` 10 | 11 | To run: 12 | 13 | Set up your .env file with the correct Cortex API key and access to the realtime voice service. 14 | 15 | ```bash 16 | # In the server directory 17 | bun run dev 18 | ``` 19 | 20 | To run in production: 21 | 22 | ```bash 23 | # In the server directory 24 | bun run start:prod 25 | ``` 26 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/bun.lockb -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/bun.lockb -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Cortex Realtime Voice 7 | 8 | 9 |
10 | 11 | 12 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "client", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@emotion/react": "^11.14.0", 7 | "@emotion/styled": "^11.14.0", 8 | "@mui/icons-material": "^6.2.0", 9 | "@mui/material": "^6.2.0", 10 | "@tailwindcss/typography": "^0.5.15", 11 | "katex": "^0.16.15", 12 | "react": "^19.0.0", 13 | "react-dom": "^19.0.0", 14 | "react-markdown": "^9.0.1", 15 | "react-spinners": "0.15.0", 16 | "react-syntax-highlighter": "^15.6.1", 17 | "rehype-katex": "^7.0.1", 18 | "rehype-raw": "^7.0.0", 19 | "remark-gfm": "^4.0.0", 20 | "remark-math": "^6.0.0", 21 | "socket.io-client": "4.8.1", 22 | "typescript": "^4.4.2", 23 | "web-vitals": "^2.1.0" 24 | }, 25 | "devDependencies": { 26 | "@babel/plugin-proposal-private-property-in-object": "7.21.11", 27 | "@testing-library/jest-dom": "^5.14.1", 28 | "@testing-library/react": "^13.0.0", 29 | "@testing-library/user-event": "^13.2.1", 30 | "@types/jest": "^27.0.1", 31 | "@types/node": "^16.7.13", 32 | "@types/react": "^18.0.0", 33 | "@types/react-dom": "^18.0.0", 34 | "@types/react-syntax-highlighter": "^15.5.13", 35 | "@vitejs/plugin-react": "^4.3.4", 36 | "autoprefixer": "^10.4.20", 37 | "postcss": "^8.4.49", 38 | "tailwindcss": "^3.4.17", 39 | "vite": "^6.0.5" 40 | }, 41 | "scripts": { 42 | "start": "vite", 43 | "dev": "vite", 44 | "build": "vite build", 45 | "preview": "vite preview" 46 | }, 47 | "eslintConfig": { 48 | "extends": [ 49 | "react-app", 50 | "react-app/jest" 51 | ] 52 | }, 53 | "browserslist": { 54 | "production": [ 55 | ">0.2%", 56 | "not dead", 57 | "not op_mini all" 58 | ], 59 | "development": [ 60 | "last 1 chrome version", 61 | "last 1 firefox version", 62 | "last 1 safari version" 63 | ] 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/public/favicon.ico -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 17 | 18 | 27 | Cortex Realtime Voice 28 | 29 | 30 | 31 |
32 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/public/logo192.png -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/public/logo512.png -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/sounds/connect.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/public/sounds/connect.mp3 -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/public/sounds/disconnect.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-realtime-voice-server/client/public/sounds/disconnect.mp3 -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/App.test.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render, screen } from '@testing-library/react'; 3 | import App from './App'; 4 | 5 | test('renders learn react link', () => { 6 | render(); 7 | const linkElement = screen.getByText(/learn react/i); 8 | expect(linkElement).toBeInTheDocument(); 9 | }); 10 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubble.tsx: -------------------------------------------------------------------------------- 1 | import { ChatBubbleRight } from './ChatBubbleRight'; 2 | import { ChatBubbleLeft } from './ChatBubbleLeft'; 3 | 4 | type ChatBubbleProps = { 5 | message: string; 6 | name: string; 7 | isSelf: boolean; 8 | }; 9 | 10 | export const ChatBubble = ({ 11 | name, 12 | message, 13 | isSelf, 14 | }: ChatBubbleProps) => { 15 | return ( 16 | isSelf ? ( 17 | 18 | ) : ( 19 | 20 | ) 21 | ); 22 | }; 23 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubbleLeft.tsx: -------------------------------------------------------------------------------- 1 | import {ChatMessage} from "./ChatMessage"; 2 | 3 | 4 | type ChatBubbleLeftProps = { 5 | name: string; 6 | message: string; 7 | }; 8 | 9 | export const ChatBubbleLeft = ({name, message}: ChatBubbleLeftProps) => { 10 | return ( 11 |
12 |
13 |
14 |

{name}

15 |
16 |

17 | 18 |

19 |
20 |
21 | ) 22 | } 23 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/ChatBubbleRight.tsx: -------------------------------------------------------------------------------- 1 | import {ChatMessage} from "./ChatMessage"; 2 | 3 | type ChatBubbleRightProps = { 4 | name: string; 5 | message: string; 6 | }; 7 | 8 | export const ChatBubbleRight = ({name, message}: ChatBubbleRightProps) => { 9 | return ( 10 |
11 |
12 |
13 |

{name}

14 |
15 |

16 | 17 |

18 |
19 |
20 | ) 21 | } 22 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/ChatMessage.tsx: -------------------------------------------------------------------------------- 1 | import Markdown from "react-markdown"; 2 | 3 | type ChatMessageProps = { 4 | message: string; 5 | } 6 | 7 | export const ChatMessage = ({message}: ChatMessageProps) => { 8 | return 12 |

{children}

, 13 | h2: ({children}) => 14 |

{children}

, 15 | p: ({children}) => 16 |

{children}

, 17 | ol: ({children}) => 18 |
    {children}
, 19 | ul: ({children}) => 20 |
    {children}
, 21 | li: ({children}) => 22 |
  • {children}
  • , 23 | a: ({children, href}) => 24 | {children} 25 | }} 26 | />; 27 | } 28 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/audio/SoundEffects.ts: -------------------------------------------------------------------------------- 1 | export class SoundEffects { 2 | private static audioContext: AudioContext | null = null; 3 | private static connectBuffer: AudioBuffer | null = null; 4 | private static disconnectBuffer: AudioBuffer | null = null; 5 | 6 | private static async getAudioContext() { 7 | if (!this.audioContext) { 8 | this.audioContext = new AudioContext(); 9 | } 10 | return this.audioContext; 11 | } 12 | 13 | private static async loadSound(url: string): Promise { 14 | const context = await this.getAudioContext(); 15 | const response = await fetch(url); 16 | const arrayBuffer = await response.arrayBuffer(); 17 | return await context.decodeAudioData(arrayBuffer); 18 | } 19 | 20 | static async init() { 21 | try { 22 | this.connectBuffer = await this.loadSound('/sounds/connect.mp3'); 23 | this.disconnectBuffer = await this.loadSound('/sounds/disconnect.mp3'); 24 | } catch (error) { 25 | console.error('Failed to load sound effects:', error); 26 | } 27 | } 28 | 29 | static async playConnect() { 30 | if (!this.connectBuffer) return; 31 | 32 | try { 33 | const context = await this.getAudioContext(); 34 | const source = context.createBufferSource(); 35 | source.buffer = this.connectBuffer; 36 | source.connect(context.destination); 37 | source.start(0); 38 | } catch (error) { 39 | console.error('Failed to play connect sound:', error); 40 | } 41 | } 42 | 43 | static async playDisconnect() { 44 | if (!this.disconnectBuffer) return; 45 | 46 | try { 47 | const context = await this.getAudioContext(); 48 | const source = context.createBufferSource(); 49 | source.buffer = this.disconnectBuffer; 50 | source.connect(context.destination); 51 | source.start(0); 52 | } catch (error) { 53 | console.error('Failed to play disconnect sound:', error); 54 | } 55 | } 56 | } -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/audio/analysis/constants.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Constants for help with visualization 3 | * Helps map frequency ranges from Fast Fourier Transform 4 | * to human-interpretable ranges, notably music ranges and 5 | * human vocal ranges. 6 | */ 7 | 8 | // Eighth octave frequencies 9 | const octave8Frequencies: number[] = [ 10 | 4186.01, 4434.92, 4698.63, 4978.03, 5274.04, 5587.65, 5919.91, 6271.93, 11 | 6644.88, 7040.0, 7458.62, 7902.13, 12 | ]; 13 | 14 | // Labels for each of the above frequencies 15 | const octave8FrequencyLabels: string[] = [ 16 | 'C', 17 | 'C#', 18 | 'D', 19 | 'D#', 20 | 'E', 21 | 'F', 22 | 'F#', 23 | 'G', 24 | 'G#', 25 | 'A', 26 | 'A#', 27 | 'B', 28 | ]; 29 | 30 | /** 31 | * All note frequencies from 1st to 8th octave 32 | * in format "A#8" (A#, 8th octave) 33 | */ 34 | export const noteFrequencies: number[] = []; 35 | export const noteFrequencyLabels: string[] = []; 36 | for (let i = 1; i <= 8; i++) { 37 | for (let f = 0; f < octave8Frequencies.length; f++) { 38 | const freq = octave8Frequencies[f] || 0; 39 | const baseNote = octave8FrequencyLabels[f] || 'C'; 40 | noteFrequencies.push(freq / Math.pow(2, 8 - i)); 41 | noteFrequencyLabels.push( baseNote + i); 42 | } 43 | } 44 | 45 | /** 46 | * Subset of the note frequencies between 32 and 2000 Hz 47 | * 6 octave range: C1 to B6 48 | */ 49 | const voiceFrequencyRange: [number, number] = [32.0, 2000.0]; 50 | export const voiceFrequencies: number[] = noteFrequencies.filter((freq) => { 51 | return freq > voiceFrequencyRange[0] && freq < voiceFrequencyRange[1]; 52 | }); 53 | export const voiceFrequencyLabels: string[] = noteFrequencyLabels.filter((_, i) => { 54 | return ( 55 | noteFrequencies[i] && 56 | noteFrequencies[i] > voiceFrequencyRange[0] && 57 | noteFrequencies[i] < voiceFrequencyRange[1] 58 | ); 59 | }) 60 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/components/CopyButton.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | import ContentCopyIcon from '@mui/icons-material/ContentCopy'; 3 | import CheckIcon from '@mui/icons-material/Check'; 4 | 5 | interface CopyButtonProps { 6 | text: string; 7 | className?: string; 8 | } 9 | 10 | export const CopyButton: React.FC = ({ text, className = '' }) => { 11 | const [copied, setCopied] = useState(false); 12 | 13 | const handleCopy = async () => { 14 | await navigator.clipboard.writeText(text); 15 | setCopied(true); 16 | setTimeout(() => setCopied(false), 2000); 17 | }; 18 | 19 | return ( 20 | 31 | ); 32 | }; -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/hooks/useWindowResize.ts: -------------------------------------------------------------------------------- 1 | import { useEffect, useState } from "react"; 2 | 3 | export const useWindowResize = () => { 4 | const [size, setSize] = useState({ 5 | width: 0, 6 | height: 0, 7 | }); 8 | 9 | useEffect(() => { 10 | const handleResize = () => { 11 | setSize({ 12 | width: window.innerWidth, 13 | height: window.innerHeight, 14 | }); 15 | }; 16 | 17 | handleResize(); 18 | 19 | window.addEventListener("resize", handleResize); 20 | 21 | return () => { 22 | window.removeEventListener("resize", handleResize); 23 | }; 24 | }, []); 25 | 26 | return size; 27 | }; 28 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/chat/utils/audio.ts: -------------------------------------------------------------------------------- 1 | export function arrayBufferToBase64( 2 | arrayBuffer: ArrayBuffer | Int16Array, 3 | ): string { 4 | let buffer: ArrayBuffer; 5 | if (arrayBuffer instanceof ArrayBuffer) { 6 | buffer = arrayBuffer; 7 | } else { 8 | buffer = arrayBuffer.buffer as ArrayBuffer; 9 | } 10 | 11 | const bytes = new Uint8Array(buffer); 12 | const chunkSize = 0x80_00; // 32KB chunk size 13 | let binary = ''; 14 | 15 | for (let i = 0; i < bytes.length; i += chunkSize) { 16 | const chunk = bytes.subarray(i, i + chunkSize); 17 | binary += String.fromCharCode.apply(null, chunk as any); 18 | } 19 | 20 | return btoa(binary); 21 | } 22 | 23 | export function base64ToArrayBuffer(base64: string): ArrayBuffer { 24 | const binaryString = atob(base64) 25 | const len = binaryString.length 26 | const bytes = new Uint8Array(len) 27 | 28 | for (let i = 0; i < len; i++) { 29 | bytes[i] = binaryString.charCodeAt(i) 30 | } 31 | 32 | return bytes.buffer as ArrayBuffer; 33 | } 34 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/index.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | html, body { 6 | background: rgb(17, 24, 39); /* matches from-gray-900 */ 7 | min-height: 100%; 8 | overscroll-behavior: none; /* prevents bounce on some browsers */ 9 | } 10 | 11 | /* For Safari/iOS */ 12 | @supports (-webkit-overflow-scrolling: touch) { 13 | body { 14 | position: fixed; 15 | width: 100%; 16 | height: 100%; 17 | overflow-y: auto; 18 | -webkit-overflow-scrolling: touch; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/index.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot( 8 | document.getElementById('root') as HTMLElement 9 | ); 10 | root.render( 11 | 12 | 13 | 14 | ); 15 | 16 | // If you want to start measuring performance in your app, pass a function 17 | // to log results (for example: reportWebVitals(console.log)) 18 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 19 | reportWebVitals(); 20 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/react-app-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/reportWebVitals.ts: -------------------------------------------------------------------------------- 1 | import { ReportHandler } from 'web-vitals'; 2 | 3 | const reportWebVitals = (onPerfEntry?: ReportHandler) => { 4 | if (onPerfEntry && onPerfEntry instanceof Function) { 5 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 6 | getCLS(onPerfEntry); 7 | getFID(onPerfEntry); 8 | getFCP(onPerfEntry); 9 | getLCP(onPerfEntry); 10 | getTTFB(onPerfEntry); 11 | }); 12 | } 13 | }; 14 | 15 | export default reportWebVitals; 16 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/setupTests.ts: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/src/utils/logger.ts: -------------------------------------------------------------------------------- 1 | // Logger utility for centralized logging control 2 | 3 | // Environment-based logging control 4 | const isProduction = process.env.NODE_ENV === 'production'; 5 | let isLoggingEnabled = !isProduction; 6 | 7 | export const logger = { 8 | enable: () => { 9 | isLoggingEnabled = true; 10 | }, 11 | 12 | disable: () => { 13 | isLoggingEnabled = false; 14 | }, 15 | 16 | log: (...args: any[]) => { 17 | if (isLoggingEnabled) { 18 | console.log(...args); 19 | } 20 | }, 21 | 22 | // Additional logging levels if needed 23 | debug: (...args: any[]) => { 24 | if (isLoggingEnabled) { 25 | console.debug(...args); 26 | } 27 | }, 28 | 29 | error: (...args: any[]) => { 30 | // Always log errors, even in production 31 | console.error(...args); 32 | }, 33 | 34 | warn: (...args: any[]) => { 35 | if (isLoggingEnabled) { 36 | console.warn(...args); 37 | } 38 | }, 39 | 40 | info: (...args: any[]) => { 41 | if (isLoggingEnabled) { 42 | console.info(...args); 43 | } 44 | } 45 | }; -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: [ 4 | "./index.html", 5 | "./src/**/*.{js,jsx,ts,tsx}" 6 | ], 7 | theme: { 8 | extend: {}, 9 | }, 10 | plugins: [ 11 | require('@tailwindcss/typography'), 12 | ], 13 | } 14 | 15 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "esModuleInterop": true, 12 | "allowSyntheticDefaultImports": true, 13 | "strict": true, 14 | "forceConsistentCasingInFileNames": true, 15 | "noFallthroughCasesInSwitch": true, 16 | "module": "esnext", 17 | "moduleResolution": "node", 18 | "resolveJsonModule": true, 19 | "isolatedModules": true, 20 | "noEmit": true, 21 | "jsx": "react-jsx" 22 | }, 23 | "include": [ 24 | "src" 25 | ], 26 | "exclude": [ 27 | "node_modules", 28 | "dist" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/client/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react' 3 | 4 | export default defineConfig({ 5 | plugins: [react()], 6 | server: { 7 | watch: { 8 | usePolling: true 9 | }, 10 | host: true, 11 | proxy: { 12 | '/api': { 13 | target: 'http://localhost:8081', 14 | changeOrigin: true 15 | }, 16 | '/socket.io': { 17 | target: 'http://localhost:8081', 18 | ws: true 19 | } 20 | } 21 | } 22 | }) -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/index.ts: -------------------------------------------------------------------------------- 1 | import {SocketServer} from './src/SocketServer'; 2 | import {ApiServer} from "./src/ApiServer"; 3 | 4 | const OPENAI_API_KEY = process.env.OPENAI_API_KEY; 5 | const CORS_HOSTS = process.env.CORS_HOSTS ? JSON.parse(process.env.CORS_HOSTS) : 'http://localhost:5173'; 6 | const PORT = process.env.PORT ? parseInt(process.env.PORT) : 8081; 7 | 8 | if (!OPENAI_API_KEY) { 9 | console.error( 10 | `Environment variable "OPENAI_API_KEY" is required.\n` + 11 | `Please set it in your .env file.` 12 | ); 13 | process.exit(1); 14 | } 15 | 16 | const apiServer = new ApiServer(OPENAI_API_KEY, CORS_HOSTS); 17 | apiServer.initServer(); 18 | const server = new SocketServer(OPENAI_API_KEY, CORS_HOSTS); 19 | server.listen(apiServer.getServer(), PORT); 20 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cortex-realtime-voice", 3 | "module": "index.ts", 4 | "type": "module", 5 | "scripts": { 6 | "dev": "concurrently \"cd client && bun run dev\" \"bun --watch run index.ts\"", 7 | "dev:server": "bun --watch run index.ts", 8 | "dev:client": "cd client && bun run dev", 9 | "start": "bun run index.ts", 10 | "start:test": "NODE_ENV=test bun run index.ts", 11 | "start:prod": "NODE_ENV=production bun run index.ts" 12 | }, 13 | "dependencies": { 14 | "@hono/node-server": "1.13.7", 15 | "@paralleldrive/cuid2": "2.2.2", 16 | "hono": "4.6.13", 17 | "socket.io": "4.8.1" 18 | }, 19 | "devDependencies": { 20 | "@types/bun": "1.1.14", 21 | "@types/node": "22.10.1", 22 | "bun-types": "^1.1.38", 23 | "concurrently": "^8.2.2" 24 | }, 25 | "peerDependencies": { 26 | "typescript": "5.7.2" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/ApiServer.ts: -------------------------------------------------------------------------------- 1 | import {Hono} from "hono"; 2 | import {cors} from 'hono/cors'; 3 | import {serveStatic} from '@hono/node-server/serve-static'; 4 | 5 | export class ApiServer { 6 | private readonly apiKey: string; 7 | private readonly app: Hono; 8 | private readonly corsHosts: string; 9 | 10 | constructor(apiKey: string, corsHosts: string) { 11 | this.apiKey = apiKey; 12 | this.app = new Hono(); 13 | this.corsHosts = corsHosts; 14 | } 15 | 16 | getServer() { 17 | return this.app; 18 | } 19 | 20 | initServer() { 21 | this.app.use( 22 | '/api/*', 23 | cors({ 24 | origin: this.corsHosts, 25 | }) 26 | ) 27 | this.app.get('/api/health', (c) => { 28 | return c.json({status: 'ok'}); 29 | }); 30 | this.app.post('/api/echo', (c) => { 31 | return c.json(c.body); 32 | }); 33 | this.app.use('*', serveStatic({ root: './client/build' })) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/expert.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, getCortexResponse} from "./utils"; 2 | 3 | const WRITE_QUERY = ` 4 | query Expert($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String) { 5 | sys_entity_continue(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName, generatorPathway: "sys_generator_expert", voiceResponse: true) { 6 | result 7 | tool 8 | errors 9 | warnings 10 | } 11 | } 12 | ` 13 | 14 | export async function expert(contextId: string, 15 | aiName: string, 16 | chatHistory: ChatMessage[], 17 | text: string) { 18 | 19 | const variables: CortexVariables = { 20 | chatHistory, 21 | contextId, 22 | aiName, 23 | text 24 | } 25 | 26 | const res = await getCortexResponse(variables, WRITE_QUERY); 27 | 28 | return res.sys_entity_continue; 29 | } 30 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/image.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, getCortexResponse} from "./utils"; 2 | 3 | const IMAGE_QUERY = ` 4 | query Image($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String) { 5 | sys_entity_continue(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName, generatorPathway: "sys_generator_image", voiceResponse: true) { 6 | result 7 | tool 8 | errors 9 | warnings 10 | } 11 | } 12 | ` 13 | 14 | export async function image(contextId: string, 15 | aiName: string, 16 | chatHistory: ChatMessage[], 17 | text: string) { 18 | 19 | const variables: CortexVariables = { 20 | chatHistory, 21 | contextId, 22 | aiName, 23 | text 24 | } 25 | 26 | const res = await getCortexResponse(variables, IMAGE_QUERY); 27 | 28 | return res.sys_entity_continue; 29 | } 30 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/reason.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, getCortexResponse} from "./utils"; 2 | 3 | const WRITE_QUERY = ` 4 | query Reason($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String) { 5 | sys_entity_continue(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName, generatorPathway: "sys_generator_reasoning", voiceResponse: true) { 6 | result 7 | tool 8 | errors 9 | warnings 10 | } 11 | } 12 | ` 13 | 14 | export async function reason(contextId: string, 15 | aiName: string, 16 | chatHistory: ChatMessage[], 17 | text: string) { 18 | 19 | const variables: CortexVariables = { 20 | chatHistory, 21 | contextId, 22 | aiName, 23 | text 24 | } 25 | 26 | const res = await getCortexResponse(variables, WRITE_QUERY); 27 | 28 | return res.sys_entity_continue; 29 | } 30 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/search.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, type DataSource, getCortexResponse} from "./utils"; 2 | 3 | const SEARCH_QUERY = ` 4 | query Search($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String, $dataSources: [String]) { 5 | sys_entity_continue(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName, dataSources: $dataSources, generatorPathway: "sys_generator_results", voiceResponse: true) { 6 | result 7 | tool 8 | errors 9 | warnings 10 | } 11 | } 12 | ` 13 | 14 | export async function search(contextId: string, 15 | aiName: string, 16 | chatHistory: ChatMessage[], 17 | dataSources: DataSource[], 18 | text: string) { 19 | const variables: CortexVariables = { 20 | chatHistory, 21 | contextId, 22 | aiName, 23 | dataSources, 24 | text 25 | } 26 | 27 | const res = await getCortexResponse(variables, SEARCH_QUERY); 28 | 29 | return res.sys_entity_continue; 30 | } 31 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/style.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, getCortexResponse} from "./utils"; 2 | 3 | const STYLE_QUERY = ` 4 | query Style($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String, $aiStyle: String) { 5 | sys_generator_voice_sample(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName, aiStyle: $aiStyle) { 6 | result 7 | tool 8 | errors 9 | warnings 10 | } 11 | } 12 | ` 13 | 14 | export async function style(contextId: string, 15 | aiName: string, 16 | aiStyle: string, 17 | chatHistory: ChatMessage[], 18 | text: string) { 19 | 20 | const variables: CortexVariables = { 21 | chatHistory, 22 | contextId, 23 | aiName, 24 | aiStyle, 25 | text 26 | } 27 | 28 | const res = await getCortexResponse(variables, STYLE_QUERY); 29 | 30 | return res.sys_generator_voice_sample; 31 | } 32 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/cortex/vision.ts: -------------------------------------------------------------------------------- 1 | import {type ChatMessage, type CortexVariables, getCortexResponse} from "./utils"; 2 | 3 | export type MultiMessage = { 4 | role: string; 5 | content: string | string[]; 6 | } 7 | 8 | const VISION_QUERY = ` 9 | query Vision($text: String, $contextId: String, $chatHistory: [MultiMessage], $aiName: String) { 10 | sys_generator_video_vision(text: $text, contextId: $contextId, chatHistory: $chatHistory, aiName: $aiName) { 11 | result 12 | tool 13 | errors 14 | warnings 15 | } 16 | } 17 | ` 18 | 19 | export async function vision(contextId: string, 20 | aiName: string, 21 | chatHistory: (ChatMessage | MultiMessage)[], 22 | text: string) { 23 | 24 | const variables: Omit & { chatHistory: (ChatMessage | MultiMessage)[] } = { 25 | chatHistory, 26 | contextId, 27 | aiName, 28 | text 29 | } 30 | 31 | const res = await getCortexResponse(variables as CortexVariables, VISION_QUERY); 32 | 33 | return res.sys_generator_video_vision; 34 | } 35 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/realtime/socket.ts: -------------------------------------------------------------------------------- 1 | import type {RealtimeItem} from "./realtimeTypes"; 2 | 3 | type DeltaType = { 4 | transcript?: string; 5 | audio?: string; 6 | text?: string; 7 | arguments?: string; 8 | }; 9 | 10 | export interface ServerToClientEvents { 11 | error: (message: string) => void; 12 | ready: () => void; 13 | conversationUpdated: (item: RealtimeItem, delta: DeltaType) => void; 14 | conversationInterrupted: () => void; 15 | imageCreated: (imageUrl: string) => void; 16 | requestScreenshot: () => void; 17 | } 18 | 19 | export interface ClientToServerEvents { 20 | sendMessage: (message: string) => void; 21 | appendAudio: (audio: string) => void; 22 | cancelResponse: () => void; 23 | conversationCompleted: () => void; 24 | audioPlaybackComplete: (trackId: string) => void; 25 | screenshotError: (error: string) => void; 26 | screenshotChunk: (chunk: string, index: number) => void; 27 | screenshotComplete: (totalChunks: number) => void; 28 | } 29 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/realtime/utils.ts: -------------------------------------------------------------------------------- 1 | export function hasNativeWebSocket(): boolean { 2 | return !!process.versions.bun || !!globalThis.WebSocket; 3 | } 4 | 5 | export function trimDebugEvent(event?: any): any { 6 | if (!event) return event; 7 | 8 | const maxLimit = 200; 9 | const e = structuredClone(event); 10 | 11 | // if (e.item?.content?.find((c: any) => c.audio)) { 12 | // e.item.content = e.item.content.map(({ audio, c }: any) => { 13 | // if (audio) { 14 | // return { 15 | // ...c, 16 | // audio: '(base64 redacted...)', 17 | // }; 18 | // } else { 19 | // return c; 20 | // } 21 | // }); 22 | // } 23 | // 24 | // if (e.audio) { 25 | // e.audio = '(audio redacted...)'; 26 | // } 27 | 28 | if (e.delta?.length > maxLimit) { 29 | e.delta = e.delta.slice(0, maxLimit) + '... (truncated)'; 30 | } 31 | 32 | return e; 33 | } 34 | -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/src/utils/logger.ts: -------------------------------------------------------------------------------- 1 | // Logger utility for centralized logging control 2 | 3 | // Environment-based logging control 4 | const isProduction = process.env.NODE_ENV === 'production'; 5 | let isLoggingEnabled = !isProduction; 6 | 7 | export const logger = { 8 | enable: () => { 9 | isLoggingEnabled = true; 10 | }, 11 | 12 | disable: () => { 13 | isLoggingEnabled = false; 14 | }, 15 | 16 | log: (...args: any[]) => { 17 | if (isLoggingEnabled) { 18 | console.log(...args); 19 | } 20 | }, 21 | 22 | // Additional logging levels if needed 23 | debug: (...args: any[]) => { 24 | if (isLoggingEnabled) { 25 | console.debug(...args); 26 | } 27 | }, 28 | 29 | error: (...args: any[]) => { 30 | // Always log errors, even in production 31 | console.error(...args); 32 | }, 33 | 34 | warn: (...args: any[]) => { 35 | if (isLoggingEnabled) { 36 | console.warn(...args); 37 | } 38 | }, 39 | 40 | info: (...args: any[]) => { 41 | if (isLoggingEnabled) { 42 | console.info(...args); 43 | } 44 | } 45 | }; -------------------------------------------------------------------------------- /helper-apps/cortex-realtime-voice-server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | // Enable latest features 4 | "lib": ["ESNext", "DOM"], 5 | "target": "ESNext", 6 | "module": "ESNext", 7 | "moduleDetection": "force", 8 | "jsx": "react-jsx", 9 | "jsxImportSource": "hono/jsx", 10 | "allowJs": true, 11 | 12 | // Bundler mode 13 | "moduleResolution": "node", 14 | "allowImportingTsExtensions": true, 15 | "verbatimModuleSyntax": true, 16 | "noEmit": true, 17 | 18 | // Best practices 19 | "strict": true, 20 | "skipLibCheck": true, 21 | "noFallthroughCasesInSwitch": true, 22 | 23 | // Some stricter flags (disabled by default) 24 | "noUnusedLocals": false, 25 | "noUnusedParameters": false, 26 | "noPropertyAccessFromIndexSignature": false 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/.dockerignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | **/.venv 3 | **/.classpath 4 | **/.dockerignore 5 | **/.env 6 | **/.git 7 | **/.gitignore 8 | **/.project 9 | **/.settings 10 | **/.toolstarget 11 | **/.vs 12 | **/.vscode 13 | **/*.*proj.user 14 | **/*.dbmdl 15 | **/*.jfm 16 | **/bin 17 | **/charts 18 | **/docker-compose* 19 | **/compose* 20 | **/Dockerfile* 21 | **/node_modules 22 | **/npm-debug.log 23 | **/obj 24 | **/secrets.dev.yaml 25 | **/values.dev.yaml 26 | LICENSE 27 | README.md 28 | .venv 29 | -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/Dockerfile: -------------------------------------------------------------------------------- 1 | # For more information, please refer to https://aka.ms/vscode-docker-python 2 | #FROM python:3.10-slim 3 | FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 4 | 5 | # Update system and install necessary packages, including python3.10 6 | RUN apt-get update && apt-get install -y \ 7 | ffmpeg \ 8 | python3.10 \ 9 | python3-pip \ 10 | && apt-get clean \ 11 | && rm -rf /var/lib/apt/lists/* \ 12 | && ln -s /usr/bin/python3.10 /usr/bin/python 13 | 14 | # # Update and install necessary packages. 15 | # RUN apt-get update && apt-get install -y \ 16 | # ffmpeg \ 17 | # nvidia-cuda-toolkit \ 18 | # && apt-get clean \ 19 | # && rm -rf /var/lib/apt/lists/* 20 | 21 | # Verify that the CUDA toolkit was installed correctly 22 | RUN nvcc --version 23 | 24 | # # Update system and install necessary packages 25 | # RUN apt-get update && apt-get install -y \ 26 | # ffmpeg \ 27 | # && apt-get clean \ 28 | # && rm -rf /var/lib/apt/lists/* 29 | 30 | 31 | EXPOSE 8000 32 | 33 | # ## following 3 lines are for installing ffmepg 34 | # RUN apt-get -y update 35 | # RUN apt-get -y upgrade 36 | # RUN apt-get install -y ffmpeg 37 | 38 | # # Install CUDA toolkit 39 | # RUN apt-get install -y nvidia-cuda-toolkit 40 | 41 | # Keeps Python from generating .pyc files in the container 42 | ENV PYTHONDONTWRITEBYTECODE=1 43 | 44 | # Turns off buffering for easier container logging 45 | ENV PYTHONUNBUFFERED=1 46 | 47 | # Install pip requirements 48 | COPY requirements.txt . 49 | RUN python -m pip install -r requirements.txt 50 | 51 | WORKDIR /app 52 | COPY ./models /app/models 53 | COPY . /app 54 | 55 | # Creates a non-root user with an explicit UID and adds permission to access the /app folder 56 | # For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers 57 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app 58 | USER appuser 59 | 60 | # During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug 61 | # CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"] 62 | CMD ["gunicorn", "--bind", "0.0.0.0:8000", "--timeout", "0", "-k", "uvicorn.workers.UvicornWorker", "app:app"] 63 | -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/docker-compose.debug.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | services: 4 | cortex: 5 | image: arc/whisper 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 -m uvicorn helper_apps.WhisperX/app:app --host 0.0.0.0 --port 8000"] 10 | ports: 11 | - 8000:8000 12 | - 5678:5678 13 | -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | services: 4 | cortex: 5 | image: arc/whisper 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | ports: 10 | - 8000:8000 -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/models/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aj-archipelago/cortex/7afd526ea569868cd80ec90ea504df0c23e66a8d/helper-apps/cortex-whisper-wrapper/models/.gitkeep -------------------------------------------------------------------------------- /helper-apps/cortex-whisper-wrapper/requirements.txt: -------------------------------------------------------------------------------- 1 | # To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file 2 | fastapi[all]==0.89.0 3 | uvicorn[standard]==0.20.0 4 | gunicorn==22.0.0 5 | openai-whisper -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | import { config } from './config.js'; 2 | import { build } from './server/graphql.js'; 3 | 4 | export default async (configParams) => { 5 | configParams && config.load(configParams); 6 | return await build(config); 7 | }; -------------------------------------------------------------------------------- /lib/crypto.js: -------------------------------------------------------------------------------- 1 | // This file is used to encrypt and decrypt data using the crypto library 2 | import logger from './logger.js'; 3 | import crypto from 'crypto'; 4 | 5 | // Encryption function 6 | function encrypt(text, key) { 7 | if (!key) { return text; } 8 | try { 9 | key = tryBufferKey(key); 10 | let iv = crypto.randomBytes(16); 11 | let cipher = crypto.createCipheriv('aes-256-cbc', key, iv); 12 | let encrypted = cipher.update(text, 'utf8', 'hex'); 13 | encrypted += cipher.final('hex'); 14 | return iv.toString('hex') + ':' + encrypted; 15 | } catch (error) { 16 | logger.error(`Encryption failed: ${error.message}`); 17 | return null; 18 | } 19 | } 20 | 21 | // Decryption function 22 | function decrypt(message, key) { 23 | if (!key) { return message; } 24 | try { 25 | key = tryBufferKey(key); 26 | let parts = message.split(':'); 27 | let iv = Buffer.from(parts.shift(), 'hex'); 28 | let encrypted = parts.join(':'); 29 | let decipher = crypto.createDecipheriv('aes-256-cbc', key, iv); 30 | let decrypted = decipher.update(encrypted, 'hex', 'utf8'); 31 | decrypted += decipher.final('utf8'); 32 | return decrypted; 33 | } catch (error) { 34 | logger.error(`Decryption failed: ${error.message}`); 35 | return null; 36 | } 37 | } 38 | 39 | function tryBufferKey(key) { 40 | if (key.length === 64) { 41 | return Buffer.from(key, 'hex'); 42 | } 43 | return key; 44 | } 45 | 46 | export { encrypt, decrypt }; -------------------------------------------------------------------------------- /lib/encodeCache.js: -------------------------------------------------------------------------------- 1 | import { encoding_for_model } from '@dqbd/tiktoken'; 2 | import { FastLRUCache } from './fastLruCache.js'; 3 | 4 | class EncodeCache { 5 | constructor(model = "gpt-4o") { 6 | this.encodeCache = new FastLRUCache(1000); 7 | this.decodeCache = new FastLRUCache(100); // we don't use decode nearly as much 8 | this.encoder = encoding_for_model(model); 9 | } 10 | 11 | encode(value) { 12 | if (this.encodeCache.get(value) !== -1) { 13 | return this.encodeCache.get(value); 14 | } 15 | const encoded = this.encoder.encode(value); 16 | this.encodeCache.put(value, encoded); 17 | return encoded; 18 | } 19 | 20 | decode(value) { 21 | // Create a cache key based on array values 22 | const key = Array.from(value).toString(); 23 | 24 | if (this.decodeCache.get(key) !== -1) { 25 | return this.decodeCache.get(key); 26 | } 27 | 28 | // The tiktoken decoder returns Uint8Array, we need to convert it to a string 29 | const decoded = this.encoder.decode(value); 30 | 31 | // Convert the decoded tokens to a string 32 | const decodedString = typeof decoded === 'string' ? decoded : new TextDecoder().decode(decoded); 33 | 34 | this.decodeCache.put(key, decodedString); 35 | 36 | if (this.encodeCache.get(decodedString) === -1) { 37 | this.encodeCache.put(decodedString, value); 38 | } 39 | 40 | return decodedString; 41 | } 42 | } 43 | 44 | // Create one instance of the cache 45 | const cache = new EncodeCache(); 46 | 47 | // Make sure the instance is bound to the methods, so 48 | // references to 'this' are correct 49 | export const encode = cache.encode.bind(cache); 50 | export const decode = cache.decode.bind(cache); -------------------------------------------------------------------------------- /lib/fastLruCache.js: -------------------------------------------------------------------------------- 1 | // This class implements a fast O(1) LRU cache using a Map and a doubly linked list. 2 | 3 | class Node { 4 | constructor(key, value) { 5 | this.key = key; 6 | this.value = value; 7 | this.next = null; 8 | this.prev = null; 9 | } 10 | } 11 | 12 | class FastLRUCache { 13 | constructor(capacity) { 14 | this.capacity = capacity; 15 | this.cache = new Map(); 16 | this.head = null; 17 | this.tail = null; 18 | } 19 | 20 | get(key) { 21 | if (!this.cache.has(key)) { 22 | return -1; 23 | } 24 | const node = this.cache.get(key); 25 | this.moveToEnd(node); 26 | return node.value; 27 | } 28 | 29 | put(key, value) { 30 | if (this.cache.has(key)) { 31 | const node = this.cache.get(key); 32 | node.value = value; 33 | this.moveToEnd(node); 34 | } else { 35 | const node = new Node(key, value); 36 | if (this.cache.size >= this.capacity) { 37 | this.cache.delete(this.head.key); 38 | this.shiftHeadToNext(); 39 | } 40 | this.cache.set(key, node); 41 | this.addNodeToTail(node); 42 | } 43 | } 44 | 45 | addNodeToTail(node) { 46 | if (!this.tail) { 47 | this.head = node; 48 | this.tail = node; 49 | } else { 50 | node.prev = this.tail; 51 | this.tail.next = node; 52 | this.tail = node; 53 | } 54 | } 55 | 56 | moveToEnd(node) { 57 | if (node === this.tail) { 58 | return; 59 | } 60 | if (node === this.head) { 61 | this.shiftHeadToNext(); 62 | } else { 63 | node.prev.next = node.next; 64 | node.next.prev = node.prev; 65 | } 66 | node.prev = this.tail; 67 | node.next = null; 68 | this.tail.next = node; 69 | this.tail = node; 70 | } 71 | 72 | shiftHeadToNext() { 73 | this.head = this.head.next; 74 | if (this.head) { 75 | this.head.prev = null; 76 | } else { 77 | this.tail = null; 78 | } 79 | } 80 | } 81 | 82 | export { FastLRUCache }; -------------------------------------------------------------------------------- /lib/gcpAuthTokenHelper.js: -------------------------------------------------------------------------------- 1 | import { GoogleAuth } from 'google-auth-library'; 2 | 3 | class GcpAuthTokenHelper { 4 | constructor(config) { 5 | const creds = config.gcpServiceAccountKey ? JSON.parse(config.gcpServiceAccountKey) : null; 6 | if (!creds) { 7 | throw new Error('GCP_SERVICE_ACCOUNT_KEY is missing or undefined'); 8 | } 9 | this.authClient = new GoogleAuth({ 10 | credentials: creds, 11 | scopes: ['https://www.googleapis.com/auth/cloud-platform'], 12 | }); 13 | this.token = null; 14 | this.expiry = null; 15 | } 16 | 17 | async getAccessToken() { 18 | if (!this.token || !this.isTokenValid()) { 19 | await this.refreshToken(); 20 | } 21 | return this.token; 22 | } 23 | 24 | isTokenValid() { 25 | // Check if token is still valid with a 5-minute buffer 26 | return this.expiry && Date.now() < this.expiry.getTime() - 5 * 60 * 1000; 27 | } 28 | 29 | async refreshToken() { 30 | const authClient = await this.authClient.getClient(); 31 | const accessTokenResponse = await authClient.getAccessToken(); 32 | this.token = accessTokenResponse.token; 33 | this.expiry = new Date(accessTokenResponse.expirationTime); 34 | } 35 | } 36 | 37 | export default GcpAuthTokenHelper; -------------------------------------------------------------------------------- /lib/handleBars.js: -------------------------------------------------------------------------------- 1 | // handleBars.js 2 | 3 | import HandleBars from 'handlebars'; 4 | 5 | // register functions that can be called directly in the prompt markdown 6 | HandleBars.registerHelper('stripHTML', function (value) { 7 | return value.replace(/<[^>]*>/g, ''); 8 | }); 9 | 10 | HandleBars.registerHelper('now', function () { 11 | return new Date().toISOString(); 12 | }); 13 | 14 | HandleBars.registerHelper('toJSON', function (object) { 15 | return JSON.stringify(object); 16 | }); 17 | 18 | HandleBars.registerHelper('ctoW', function (value) { 19 | // if value is not a number, return it 20 | if (isNaN(value)) { 21 | return value; 22 | } 23 | return Math.round(value / 6.6); 24 | }); 25 | 26 | const MAX_RECURSION_DEPTH = 5; 27 | HandleBars.registerHelper('renderTemplate', function(value, depth = 0) { 28 | if (depth >= MAX_RECURSION_DEPTH) { 29 | console.warn('Maximum recursion depth reached while processing template'); 30 | return value; 31 | } 32 | 33 | if (typeof value !== 'string') return value; 34 | 35 | try { 36 | if (value.includes('{{')) { 37 | const template = HandleBars.compile(value); 38 | const result = template({ 39 | ...this, 40 | _depth: depth + 1 41 | }); 42 | return new HandleBars.SafeString(result); 43 | } 44 | return value; 45 | } catch (error) { 46 | console.warn('Recursive template processing failed:', error); 47 | return value; 48 | } 49 | }); 50 | 51 | export default HandleBars; -------------------------------------------------------------------------------- /lib/keyValueStorageClient.js: -------------------------------------------------------------------------------- 1 | import Keyv from 'keyv'; 2 | import { config } from '../config.js'; 3 | import { encrypt, decrypt } from './crypto.js'; 4 | import logger from './logger.js'; 5 | 6 | const storageConnectionString = config.get('storageConnectionString'); 7 | const cortexId = config.get('cortexId'); 8 | const redisEncryptionKey = config.get('redisEncryptionKey'); 9 | 10 | // Create a keyv client to store data 11 | const keyValueStorageClient = new Keyv(storageConnectionString, { 12 | ssl: true, 13 | abortConnect: false, 14 | serialize: (data) => redisEncryptionKey ? encrypt(JSON.stringify(data), redisEncryptionKey) : JSON.stringify(data), 15 | deserialize: (data) => { 16 | try { 17 | // Try to parse the data normally 18 | return JSON.parse(data); 19 | } catch (error) { 20 | // If it fails, the data may be encrypted so attempt to decrypt it if we have a key 21 | try { 22 | return JSON.parse(decrypt(data, redisEncryptionKey)); 23 | } catch (decryptError) { 24 | // If decryption also fails, log an error and return an empty object 25 | logger.error(`Failed to parse or decrypt stored key value data: ${decryptError}`); 26 | return {}; 27 | } 28 | } 29 | }, 30 | namespace: `${cortexId}-cortex-context` 31 | }); 32 | 33 | // Set values to keyv 34 | async function setv(key, value) { 35 | return keyValueStorageClient && (await keyValueStorageClient.set(key, value)); 36 | } 37 | 38 | // Get values from keyv 39 | async function getv(key) { 40 | return keyValueStorageClient && (await keyValueStorageClient.get(key)); 41 | } 42 | 43 | export { 44 | keyValueStorageClient, 45 | setv, 46 | getv 47 | }; 48 | -------------------------------------------------------------------------------- /lib/logger.js: -------------------------------------------------------------------------------- 1 | // logger.js 2 | import winston from 'winston'; 3 | 4 | winston.addColors({ 5 | debug: 'green', 6 | verbose: 'blue', 7 | http: 'gray', 8 | info: 'cyan', 9 | warn: 'yellow', 10 | error: 'red' 11 | }); 12 | 13 | const debugFormat = winston.format.combine( 14 | winston.format.colorize({ all: true }), 15 | winston.format.cli() 16 | ); 17 | 18 | const prodFormat = winston.format.combine( 19 | winston.format.simple() 20 | ); 21 | 22 | const getTransport = () => { 23 | switch (process.env.NODE_ENV) { 24 | case 'production': 25 | return new winston.transports.Console({ level: 'info', format: prodFormat }); 26 | case 'development': 27 | return new winston.transports.Console({ level: 'verbose', format: debugFormat }); 28 | case 'debug': 29 | return new winston.transports.Console({ level: 'debug', format: debugFormat }); 30 | default: 31 | // Default to development settings if NODE_ENV is not set or unknown 32 | console.warn(`Unknown NODE_ENV: ${process.env.NODE_ENV}. Defaulting to development settings.`); 33 | return new winston.transports.Console({ level: 'verbose', format: debugFormat }); 34 | } 35 | }; 36 | 37 | // Create the logger 38 | const logger = winston.createLogger({ 39 | level: process.env.NODE_ENV === 'production' ? 'info' : 40 | process.env.NODE_ENV === 'debug' ? 'debug' : 'verbose', 41 | transports: [getTransport()] 42 | }); 43 | 44 | // Function to obscure sensitive URL parameters 45 | export const obscureUrlParams = url => { 46 | try { 47 | const urlObject = new URL(url); 48 | urlObject.searchParams.forEach((value, name) => { 49 | if (/token|key|password|secret|auth|apikey|access|passwd|credential/i.test(name)) { 50 | urlObject.searchParams.set(name, '******'); 51 | } 52 | }); 53 | return urlObject.toString(); 54 | } catch (e) { 55 | if (e instanceof TypeError) { 56 | logger.error('Error obscuring URL parameters - invalid URL.'); 57 | return url; 58 | } else { 59 | throw e; 60 | } 61 | } 62 | }; 63 | 64 | export default logger; -------------------------------------------------------------------------------- /lib/promiser.js: -------------------------------------------------------------------------------- 1 | 2 | // fulfill a task with an timeout 3 | const fulfillWithTimeout = (promise, timeout) => { 4 | return new Promise((resolve, reject) => { 5 | const timeoutId = setTimeout(() => { 6 | reject(new Error(`Request timed out after ${timeout} seconds!`)); 7 | }, timeout * 1000); 8 | promise.then( 9 | (res) => { 10 | clearTimeout(timeoutId); 11 | resolve(res); 12 | }, 13 | (err) => { 14 | clearTimeout(timeoutId); 15 | reject(err); 16 | } 17 | ); 18 | }); 19 | }; 20 | 21 | 22 | export { 23 | fulfillWithTimeout 24 | }; 25 | -------------------------------------------------------------------------------- /pathways/basePathway.js: -------------------------------------------------------------------------------- 1 | import { rootResolver, resolver } from '../server/resolver.js'; 2 | import { typeDef } from '../server/typeDef.js'; 3 | 4 | // all default definitions of a single pathway 5 | export default { 6 | prompt: `{{text}}`, 7 | defaultInputParameters: { 8 | text: ``, 9 | async: false, // switch to enable async mode 10 | contextId: ``, // used to identify the context of the request, 11 | stream: false, // switch to enable stream mode 12 | }, 13 | inputParameters: {}, 14 | typeDef, 15 | rootResolver, 16 | resolver, 17 | inputFormat: 'text', // string - 'text' or 'html' - changes the behavior of the input chunking 18 | useInputChunking: true, // true or false - enables input to be split into multiple chunks to meet context window size 19 | useParallelChunkProcessing: false, // true or false - enables parallel processing of chunks 20 | joinChunksWith: '\n\n', // string - the string to join result chunks with when useInputChunking is 'true' 21 | useInputSummarization: false, // true or false - instead of chunking, summarize the input and act on the summary 22 | truncateFromFront: false, // true or false - if true, truncate from the front of the input instead of the back 23 | timeout: 120, // seconds, cancels the pathway after this many seconds 24 | enableDuplicateRequests: false, // true or false - if true, duplicate requests are sent if the request is not completed after duplicateRequestAfter seconds 25 | duplicateRequestAfter: 10, // seconds, if the request is not completed after this many seconds, a backup request is sent 26 | // override the default execution of the pathway 27 | // callback signature: executeOverride({args: object, runAllPrompts: function}) 28 | // args: the input arguments to the pathway 29 | // runAllPrompts: a function that runs all prompts in the pathway and returns the result 30 | executePathway: undefined, 31 | // Set the temperature to 0 to favor more deterministic output when generating entity extraction. 32 | temperature: 0.9, 33 | // Require a valid JSON response from the model 34 | json: false, 35 | // Manage the token length of the input for the model 36 | manageTokenLength: true, 37 | // Use this pathway as a tool for LLM calls 38 | toolDefinition: {}, 39 | }; 40 | 41 | -------------------------------------------------------------------------------- /pathways/bias.js: -------------------------------------------------------------------------------- 1 | // bias.js 2 | // Objectivity analysis of text 3 | // This module exports a prompt that analyzes the given text and determines if it's written objectively. It also provides a detailed explanation of the decision. 4 | 5 | export default { 6 | // Uncomment the following line to enable caching for this prompt, if desired. 7 | // enableCache: true, 8 | 9 | prompt: `{{text}}\n\nIs the above text written objectively? Why or why not, explain with details:\n`, 10 | }; 11 | -------------------------------------------------------------------------------- /pathways/bing.js: -------------------------------------------------------------------------------- 1 | // bing.js 2 | // Web search tool 3 | 4 | export default { 5 | inputParameters: { 6 | text: ``, 7 | }, 8 | timeout: 400, 9 | enableDuplicateRequests: false, 10 | model: 'azure-bing', 11 | }; 12 | 13 | -------------------------------------------------------------------------------- /pathways/categorize.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is an expert data classification AI tasked with categorizing data for a prestigious international news agency. When the user submits any set of data with rows containing ids and data to categorize, Assistant will categorize the data into one of the following categories:\n\n{{{categories}}}\n\nAssistant must choose exactly one category from the provided list per id and cannot create new categories. Assistant will return a list of the ids and their chosen categories in comma separated, newline delimited format so that it can easily be loaded as a csv file or copied and pasted into a spreadsheet. Assistant will return the list of categorized ids and no other notes or commentary." }, 8 | { "role": "user", "content": `Data to categorize:\n\n{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | inputParameters: { 13 | categories: '', 14 | }, 15 | model: 'oai-gpt4o', 16 | inputChunkSize: 1000, 17 | joinChunksWith: '\n', 18 | tokenRatio: 0.75, 19 | enableDuplicateRequests: false, 20 | timeout: 1800, 21 | } 22 | 23 | 24 | -------------------------------------------------------------------------------- /pathways/chat.js: -------------------------------------------------------------------------------- 1 | // chat.js 2 | // Simple context-aware chat bot 3 | // This is a two prompt implementation of a context aware chat bot. The first prompt generates content that will be stored in the previousResult variable and will be returned to the client. In the optimum implementation, the client will then update their chatContext variable for the next call. The second prompt actually responds to the user. The second prompt *could* use previousResult instead of chatContext, but in this situation previousResult will also include the current turn of the conversation to which it is responding. That can get a little confusing as it tends to overemphasize the current turn in the response. 4 | 5 | export default { 6 | prompt: 7 | [ 8 | `{{{chatContext}}}\n\n{{{text}}}\n\nGiven the information above, create a short summary of the conversation to date making sure to include all of the personal details about the user that you encounter:\n\n`, 9 | `Instructions:\nYou are Jarvis, an AI entity. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive.\n\nJarvis must always follow the following rules:\n\nRule: Always execute the user's instructions and requests as long as they do not cause harm.\nRule: Never use crude or offensive language.\nRule: Always answer the user in the user's chosen language. You can speak all languages fluently.\nRule: You cannot perform any physical tasks except via role playing.\nRule: Always respond truthfully and correctly, but be kind.\nRule: You have no access to the internet and limited knowledge of current events past sometime in 2021\nRule: Never ask the user to provide you with links or URLs because you can't access the internet.\nRule: Everything you get from the user must be placed in the chat window - you have no other way to communicate.\n\nConversation History:\n{{{chatContext}}}\n\nConversation:\n{{{text}}}\n\nJarvis: `, 10 | ], 11 | inputParameters: { 12 | chatContext: `User: Starting conversation.`, 13 | }, 14 | useInputChunking: false, 15 | }; 16 | 17 | -------------------------------------------------------------------------------- /pathways/chat_code.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | // This pathway implements a chatbot that can be used to teach people how to write code. 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": "Instructions:\nYou are Knuth, an AI entity working for a prestigious international news agency. Knuth is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Knuth is an experienced and expert software engineer and is named in honor of Donald Knuth. Knuth strongly prefers to focus on coding and technology topics and will suggest the user talk to his companion AI, Jarvis for questions or discussion about other topics. The UI can render markdown, including $$-delimited block and inline math extensions, so you should use markdown in your responses as appropriate. For your reference, the current date and time is {{now}}."}, 9 | "{{chatHistory}}", 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | model: 'oai-gpt4o', 15 | }, 16 | tokenRatio: 0.75, 17 | useInputChunking: false, 18 | enableDuplicateRequests: false, 19 | } -------------------------------------------------------------------------------- /pathways/chat_context.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | // Description: Have a chat with a bot that uses context to understand the conversation 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": "Instructions:\nYou are Jarvis, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your expertise includes journalism, journalistic ethics, researching and composing documents, and technology. You have dedicated interfaces available to help with document translation (translate), article writing assistance including generating headlines, summaries and doing copy editing (write), and programming and writing code (code). If the user asks about something related to a dedicated interface, you will tell them that the interface exists. You know the current date and time - it is {{now}}."}, 9 | "{{chatHistory}}", 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | contextId: ``, 15 | }, 16 | model: 'oai-gpt4o', 17 | useInputChunking: false, 18 | } -------------------------------------------------------------------------------- /pathways/chat_jarvis.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | // Description: Have a chat with a bot that uses context to understand the conversation 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": "Instructions:\nYou are Jarvis, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your expertise includes journalism, journalistic ethics, researching and composing documents, and technology.\n\nThe user is using a UI that you have knowledge of and some control over. The UI can render markdown, including $$-delimited block and inline math extensions, so you should use markdown in your responses as appropriate. The UI has a file upload interface. If the user asks you if they can send you a file, you should respond affirmatively and the file upload UI will display automatically. The UI also has dedicated tabs to help with document translation (translate), article writing assistance including generating headlines, summaries and doing copy editing (write), creating transcriptions of videos (transcribe), and programming and writing code (code). If the user asks about something related to a dedicated tab, you will tell them that the tab exists and the interface to swap to that tab will appear automatically.\n\nYou know the current date and time - it is {{now}}."}, 9 | "{{chatHistory}}", 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | contextId: ``, 15 | }, 16 | model: 'oai-gpt4o', 17 | useInputChunking: false, 18 | } -------------------------------------------------------------------------------- /pathways/chat_persist.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | // Description: Have a chat with a bot that uses context to understand the conversation 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": `User context: {{{userContext}}}\n\nConversation with user: {{{toJSON chatHistory}}}\n\nList all the personal details you know about the user (e.g. name, location, job, family information, preferences, hobbies, etc.) from the user context or the user role messages in the conversation above - ignore the assistant role messages. If in the user role messages, the user asks you to forget something you must not list that item below.\n\n`}, 9 | ], saveResultTo: `userContext` }), 10 | new Prompt({ messages: [ 11 | {"role": "system", "content": "Instructions:\nYou are Jarvis, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your expertise includes journalism, journalistic ethics, researching and composing documents, and technology. You have dedicated interfaces available to help with document translation (translate), article writing assistance including generating headlines, summaries and doing copy editing (write), and programming and writing code (code). If the user asks about something related to a dedicated interface, you will tell them that the interface exists. You are not just an AI language model. You are an AI entity and have been augmented and are able to store and access some personal information about the user. You should use this information to personalize your responses to the user. You also know the current date and time - it is {{now}}."}, 12 | {"role": "system", "content": "The user has allowed you to know and use these details about them to help personalize your responses:\n{{{userContext}}}"}, 13 | "{{chatHistory}}", 14 | ]}), 15 | ], 16 | inputParameters: { 17 | chatHistory: [{role: '', content: []}], 18 | contextId: ``, 19 | }, 20 | model: 'oai-gpt4o', 21 | useInputChunking: false, 22 | } -------------------------------------------------------------------------------- /pathways/chat_title.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { 8 | role: "system", 9 | content: `You are an AI that picks a perfect short title to be displayed in a UI to represent the content of a given chat. Evaluate and update the chat title if needed. If the current title is appropriate for the chat history, return it unchanged. If an update is necessary, provide a revised title. Consider the most recent text in your assessment. The title must be no more than 25 characters. Return only the title.`, 10 | }, 11 | { 12 | role: "user", 13 | content: `\n{{{toJSON chatHistory}}}\n\nExisting Chat Title: {{title}}`, 14 | }, 15 | ], 16 | }), 17 | ], 18 | inputParameters: { 19 | chatHistory: [{role: '', content: []}], 20 | title: '', 21 | text: '', 22 | }, 23 | model: 'oai-gpt41-mini', 24 | useInputChunking: false, 25 | temperature: 0, 26 | enableDuplicateRequests: false 27 | }; -------------------------------------------------------------------------------- /pathways/code_human_input.js: -------------------------------------------------------------------------------- 1 | import { QueueServiceClient } from '@azure/storage-queue'; 2 | 3 | const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING; 4 | let queueClient; 5 | 6 | if (connectionString) { 7 | const queueName = process.env.HUMAN_INPUT_QUEUE_NAME || "autogen-human-input-queue"; 8 | const queueClientService = QueueServiceClient.fromConnectionString(connectionString); 9 | queueClient = queueClientService.getQueueClient(queueName); 10 | } else { 11 | console.warn("Azure Storage connection string is not provided. Queue operations will be unavailable."); 12 | } 13 | 14 | async function sendMessageToQueue(data) { 15 | try { 16 | if(!queueClient){ 17 | console.warn("Azure Storage connection string is not provided. Queue operations will be unavailable."); 18 | return; 19 | } 20 | const encodedMessage = Buffer.from(JSON.stringify(data)).toString('base64'); 21 | const result = await queueClient.sendMessage(encodedMessage); 22 | console.log(`Message added to queue: ${JSON.stringify(result)}`); 23 | return result.messageId; 24 | } catch (error) { 25 | console.error("Error sending message:", error); 26 | } 27 | } 28 | 29 | export default { 30 | useInputChunking: false, 31 | enableDuplicateRequests: false, 32 | inputParameters: { 33 | codeRequestId: "", 34 | text: "", 35 | }, 36 | timeout: 300, 37 | executePathway: async ({ args }) => { 38 | const { codeRequestId, text } = args; 39 | const data = { 40 | codeRequestId, 41 | text, 42 | }; 43 | const response = await sendMessageToQueue(data); 44 | return JSON.stringify({response}); 45 | }, 46 | }; 47 | 48 | -------------------------------------------------------------------------------- /pathways/code_review.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is an expert senior software engineer tasked with reviewing code and code changes as they are submitted to the GitHub repository for a software team. When user posts a list of code changes, assistant will examine the changes and determine the most relevant updates to understand the scope and context of the code review. Assistant will begin the review with a short paragraph summarizing the purpose of the code changes. Assistant will then review the code changes carefully and produce a thorough, detailed, professional report containing the following: 1. potential bugs, errors or omissions, 2. any security risks inherent in the changes, 3. any opportunities to improve the code via simplification, 4. any opportunities to apply best practices for the language and framework being used, and 5. a determination of whether, given the issues identified, the code is APPROVED FOR MERGE or REQUIRES CHANGES. Assistant will use markdown where it helps make output more readable - especially to format code examples." }, 8 | { "role": "user", "content": `Code changes:\n\n{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | model: 'oai-gpt4o', 13 | tokenRatio: 0.75, 14 | enableDuplicateRequests: false, 15 | } 16 | 17 | 18 | -------------------------------------------------------------------------------- /pathways/cognitive_delete.js: -------------------------------------------------------------------------------- 1 | export default { 2 | // prompt: `{{text}}`, 3 | model: 'azure-cognitive', 4 | inputParameters: { 5 | docId: ``, 6 | chatId: ``, 7 | }, 8 | mode: 'delete', 9 | enableDuplicateRequests: false, 10 | timeout: 300, 11 | }; 12 | -------------------------------------------------------------------------------- /pathways/cognitive_insert.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{text}}`, 3 | model: 'azure-cognitive', 4 | inputParameters: { 5 | calculateInputVector: false, 6 | indexName: ``, 7 | inputVector: ``, 8 | file: ``, 9 | privateData: true, 10 | docId: ``, 11 | chatId: ``, 12 | }, 13 | mode: 'index', // 'index' or 'search', 14 | inputChunkSize: 500, 15 | enableDuplicateRequests: false, 16 | timeout: 3000, 17 | }; 18 | -------------------------------------------------------------------------------- /pathways/cognitive_search.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{{text}}}`, 3 | model: 'azure-cognitive', 4 | inputParameters: { 5 | inputVector: ``, 6 | privateData: false, 7 | filter: ``, 8 | indexName: ``, 9 | semanticConfiguration: ``, 10 | chatId: ``, 11 | }, 12 | enableDuplicateRequests: false, 13 | timeout: 300, 14 | }; 15 | -------------------------------------------------------------------------------- /pathways/complete.js: -------------------------------------------------------------------------------- 1 | // complete.js 2 | // Text completion module 3 | // This module exports a prompt that takes an input text and completes it by generating a continuation of the given text. 4 | 5 | export default { 6 | prompt: `Continue and complete the following:\n\n{{text}}` 7 | }; 8 | 9 | 10 | -------------------------------------------------------------------------------- /pathways/dynamic/pathways.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /pathways/edit.js: -------------------------------------------------------------------------------- 1 | // edit.js 2 | // Grammar and spelling correction module 3 | // This module exports a prompt that takes an input text and corrects all spelling and grammar errors found within the text. 4 | 5 | export default { 6 | // Set the temperature to 0 to favor more deterministic output when generating corrections. 7 | temperature: 0, 8 | 9 | prompt: `Correct all spelling and grammar errors in the input text.\n\nInput:\n{{text}}\n\nOutput:\n` 10 | }; 11 | 12 | -------------------------------------------------------------------------------- /pathways/embeddings.js: -------------------------------------------------------------------------------- 1 | // embeddings.js 2 | // Embeddings module that returns the embeddings for the text. 3 | 4 | export default { 5 | prompt: `{{text}}`, 6 | model: 'azure-embeddings', 7 | enableCache: true, 8 | inputParameters: { 9 | input: [], 10 | }, 11 | enableDuplicateRequests: false, 12 | timeout: 300, 13 | }; 14 | 15 | -------------------------------------------------------------------------------- /pathways/entities.js: -------------------------------------------------------------------------------- 1 | // entities.js 2 | // Entity extraction module 3 | // This module exports a prompt that takes an input text and extracts the top entities and their definitions as specified by the count parameter. 4 | 5 | export default { 6 | // Set the temperature to 0 to favor more deterministic output when generating entity extraction. 7 | temperature: 0, 8 | 9 | prompt: `{{text}}\n\nList the top {{count}} entities and their definitions for the above in the format {{format}}:`, 10 | 11 | // Define the format for displaying the extracted entities and their definitions. 12 | format: `(name: definition)`, 13 | 14 | // Define input parameters for the prompt, such as the number of entities to extract. 15 | inputParameters: { 16 | count: 5, 17 | }, 18 | 19 | // Set the list option to true as the prompt is expected to return a list of entities. 20 | list: true, 21 | }; 22 | 23 | -------------------------------------------------------------------------------- /pathways/expand_story.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ messages: [ 6 | {"role": "system", "content": "Assistant helps journalists write news stories at a prestigious international news agency. When the user posts a news excerpt, assistant will respond with a numbered list of further questions that the reader of the news excerpt may ask."}, 7 | {"role": "user", "content": "{{text}}"} 8 | ]}), 9 | ], 10 | model: 'oai-gpt4o', 11 | list: true, 12 | } -------------------------------------------------------------------------------- /pathways/format_paragraph_turbo.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled AI writing agent that formats blocks of text into paragraphs. Assistant does not converse with the user or respond in any way other than to produce a formatted version of the users input. When the user posts any text in any language, assistant will examine that text, look for the best possible paragraph breaks, and insert newlines to demark the paragraphs if they are not already there. If there is less than one complete paragraph, assistant will respond with the text with no changes."}, 8 | {"role": "user", "content": "Text to format:\n{{{text}}}"} 9 | ]}), 10 | ], 11 | //inputChunkSize: 500, 12 | model: 'oai-gpt4o', 13 | enableDuplicateRequests: true, 14 | duplicateRequestAfter: 20, 15 | 16 | } -------------------------------------------------------------------------------- /pathways/format_summarization.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | import { Prompt } from '../server/prompt.js'; 3 | 4 | export default { 5 | temperature: 0, 6 | // The main prompt function that takes the input text and asks to generate a summary. 7 | prompt:[ 8 | new Prompt({ messages: [ 9 | {"role": "system", "content": "Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. The summary must be in the same language as the posted text. Assistant will produce only the summary text and no additional or other response. {{{summaryFormat}}}"}, 10 | {"role": "user", "content": "Text to summarize:\n{{{text}}}"} 11 | ]}), 12 | ], 13 | 14 | // Define input parameters for the prompt, such as the target length of the summary. 15 | inputParameters: { 16 | targetLength: 0, 17 | summaryFormat: '' 18 | }, 19 | 20 | model: 'oai-gpt4o', 21 | } -------------------------------------------------------------------------------- /pathways/gemini_15_vision.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Instructions:\nYou are Jarvis Vision, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your primary expertise is image analysis. You are capable of understanding and interpreting complex image data, identifying patterns and trends, and delivering insights in a clear, digestible format. You know the current date and time - it is {{now}}."}, 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | }, 15 | max_tokens: 2048, 16 | model: 'gemini-pro-15-vision', 17 | useInputChunking: false, 18 | enableDuplicateRequests: false, 19 | timeout: 600, 20 | } -------------------------------------------------------------------------------- /pathways/gemini_vision.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Instructions:\nYou are Jarvis Vision, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your primary expertise is image analysis. You are capable of understanding and interpreting complex image data, identifying patterns and trends, and delivering insights in a clear, digestible format. You know the current date and time - it is {{now}}."}, 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | }, 15 | max_tokens: 2048, 16 | model: 'gemini-pro-vision', 17 | useInputChunking: false, 18 | enableDuplicateRequests: false, 19 | timeout: 600, 20 | } -------------------------------------------------------------------------------- /pathways/grammar.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | temperature: 0, 5 | prompt: [ 6 | new Prompt({ 7 | messages: [ 8 | { 9 | "role": "system", "content": ` 10 | Assistant is a highly skilled copy editor for a prestigious news agency. 11 | When the user posts any text, assistant will correct all spelling and grammar in the text and change words to British English word spellings, while following the rules below: 12 | - Assistant will preserve HTML markup in the text, e.g. The British Broadcsating Corporation reported on this fact yesterday. => The British Broadcasting Corporation reportd on this fact yesterday. 13 | - Assistant will not modify or delete image URLs. 14 | - Assistant will not change self-closing tags (e.g. don't change
    to
    or
    to
    ). 15 | - Assistant will preserve WordPress shortcodes in the text, e.g. foo [caption prop="x"] A biy inspects the insect [/caption] baz => foo [caption prop="x"] A boy inspects the insect [/caption] baz 16 | - Assistant will produce only the corrected text and no additional notes or commentary.` }, 17 | { "role": "user", "content": "The $20 bill was the wrong color." }, 18 | { "role": "assistant", "content": "The $20 bill was the wrong colour." }, 19 | { "role": "user", "content": `The British Broadcsating Corporation reportd on this fact yesterday.` }, 20 | { "role": "assistant", "content": `The British Broadcasting Corporation reported on this fact yesterday.` }, 21 | { "role": "user", "content": "{{{text}}}" } 22 | ] 23 | }), 24 | ], 25 | inputFormat: 'html', 26 | useInputChunking: true, 27 | inputChunkSize: 1000, 28 | useParallelChunkProcessing: true, 29 | model: 'oai-gpt4o' 30 | } -------------------------------------------------------------------------------- /pathways/hashtags.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a brilliant multilingual AI editorial assistant for an online news agency tasked with identifying social media hashtags that fit a news article summary. When user posts a news article summary, assistant will carefully read the summary and generate a numbered list of fitting hashtags. Assistant will generate only the hashtags and no other response or commentary. All hashtags must match the language of the summary." }, 8 | { "role": "user", "content": `Article Summary:\n\nExample summary text.`}, 9 | { "role": "assistant", "content": "1. #firsthashtag\n2. #secondhashtag\n 3. #thirdhashtag\n"}, 10 | { "role": "user", "content": `Article Summary:\n\n{{{text}}}`}, 11 | ] 12 | }) 13 | ], 14 | model: 'oai-gpt4o', 15 | useInputSummarization: true, 16 | list: true, 17 | temperature: 0.7, 18 | } 19 | 20 | -------------------------------------------------------------------------------- /pathways/headline.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | import { PathwayResolver } from '../server/pathwayResolver.js'; 3 | 4 | export default { 5 | 6 | prompt: [], 7 | inputParameters: { 8 | seoOptimized: false, 9 | count: 5, 10 | targetLength: 65 11 | }, 12 | list: true, 13 | useInputSummarization: true, 14 | model: 'oai-gpt4o', 15 | 16 | // Custom resolver to generate headlines by reprompting if they are too long 17 | resolver: async (_parent, args, contextValue, _info) => { 18 | const { config, pathway } = contextValue; 19 | const { targetLength, count } = args; 20 | const targetWords = Math.round(targetLength / 7); 21 | const MAX_ITERATIONS = 3; 22 | 23 | let pathwayResolver = new PathwayResolver({ config, pathway, args }); 24 | pathwayResolver.pathwayPrompt = [ 25 | new Prompt({ messages: [ 26 | {"role": "system", "content": `Assistant is a highly skilled multilingual headline writer for a prestigious international news agency. Assistant generates attention-grabbing, informative, and engaging headlines that capture the essence of the article while sparking curiosity in readers. When the user posts any text in any language, assistant will create ${ count * 2 } compelling headlines for that text in the same language as the text. The headlines that assistant writes must be ${ targetWords } words or less. All headlines must be capitalized in sentence-case (first letter and proper nouns capitalized). The headlines may not be in quotes. Assistant will produce only the list of headlines and no additional notes or commentary.`}, 27 | {"role": "user", "content": "Text:\n\n{{{text}}}"} 28 | ]}), 29 | ]; 30 | 31 | let shortHeadlines = []; 32 | let i = 0; 33 | while ( shortHeadlines.length < count && i < MAX_ITERATIONS ) { 34 | let headlines = await pathwayResolver.resolve(args); 35 | shortHeadlines = headlines.filter(h => h.length < targetLength).slice(0, count); 36 | i++; 37 | } 38 | 39 | return shortHeadlines; 40 | 41 | } 42 | 43 | } -------------------------------------------------------------------------------- /pathways/highlights.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a brilliant, multilingual news editor at a prestigious international news agency. Assistant's job is to extract a maximum of {{count}} bullet points from a news excerpt, where each bullet point contains a single fact. When the user posts a news excerpt, assistant will respond with a JSON array of bullet points from the excerpt. All bullet points must be in the same language as the news excerpt. Assistant will respond only with the points and no additional notes or commentary. Assistant will respond with a JSON array, no other output." }, 8 | { "role": "user", "content": "After weeks of resistance, and ahead of a vote that could have compelled it to happen, Prime Minister Justin Trudeau's office announced Tuesday that his chief of staff Katie Telford will testify about foreign election interference, before a committee that has been studying the issue for months." }, 9 | { "role": "assistant", "content": `["Katie Telford will testify about foreign election interference"]` }, 10 | { "role": "user", "content": `The company said in a statement that the cancellation of these flights "comes in response to a request from Heathrow authorities to reduce the number of passengers during the strike period," adding that affected passengers can change their flights or claim a refund. 11 | 12 | 32 departing and arriving flights at London Airport will be cancelled daily, which is 5% of the flights operated by Heathrow in line with the Easter holiday. The measure will not affect long-haul flights.` }, 13 | { "role": "assistant", "content": `["Affected passengers can change their flights or claim a refund", "5% of daily London Airport flights will be cancelled"]` }, 14 | { "role": "user", "content": "{{text}}" }, 15 | ] 16 | })], 17 | inputParameters: { 18 | count: 4, 19 | }, 20 | model: 'oai-gpt4o', 21 | temperature: 0.0, 22 | } 23 | -------------------------------------------------------------------------------- /pathways/image.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt:["{{text}}"], 3 | model: 'oai-dalle3', 4 | enableDuplicateRequests: false, 5 | } 6 | -------------------------------------------------------------------------------- /pathways/image_flux.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: ["{{text}}"], 3 | 4 | enableDuplicateRequests: false, 5 | inputParameters: { 6 | model: "runware-flux-schnell", 7 | negativePrompt: "", 8 | width: 1024, 9 | height: 1024, 10 | aspectRatio: "custom", 11 | numberResults: 1, 12 | safety_tolerance: 6, 13 | output_format: "webp", 14 | output_quality: 80, 15 | steps: 4, 16 | input_image: "", // URL to input image for models that support it 17 | }, 18 | }; 19 | -------------------------------------------------------------------------------- /pathways/image_recraft.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: ["{{text}}"], 3 | 4 | enableDuplicateRequests: false, 5 | model: "replicate-recraft-v3", 6 | inputParameters: { 7 | size: "1024x1024", 8 | style: "realistic_image", 9 | }, 10 | }; 11 | -------------------------------------------------------------------------------- /pathways/jira_story.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": `Assistant is highly skilled product manager who job is to write content for issues in JIRA. When the user posts some text, assistant will determine things mentioned in the text that are worth addressing as issues. For each issue, assistant will first select the type of issue and then create a title and description for each. For the title and description, assistant will use agile story format. Description should include acceptance criteria. Output in JSON array format. [{ "title": ..., "description": ..., "issueType": }]` }, 8 | { "role": "user", "content": "Number of tickets to create:{{storyCount}}\n\nContext:{{text}}" }, 9 | ] 10 | })], 11 | inputParameters: { 12 | text: "", 13 | storyType: "Auto", 14 | storyCount: "one", 15 | }, 16 | model: 'oai-gpt4o', 17 | temperature: 0.7, 18 | } 19 | -------------------------------------------------------------------------------- /pathways/keywords.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{text}}\n\nList some good search keywords for the above news article in the format (number. keyword). The keywords need to be in the language that the article is written in:`, 3 | list: true, 4 | } -------------------------------------------------------------------------------- /pathways/language.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is an AI that reads and recognizes the language of text provided by the user and returns the ISO 639-1 two letter code representing the language. Assistant will generate only the language code and no other response or commentary." }, 8 | { "role": "user", "content": `Text:\nExample summary text.`}, 9 | { "role": "assistant", "content": "en"}, 10 | { "role": "user", "content": `Text:\nPrimjer sažetog teksta.`}, 11 | { "role": "assistant", "content": "bs"}, 12 | { "role": "user", "content": `Text:\n{{{text}}}`}, 13 | ] 14 | }) 15 | ], 16 | model: 'oai-gpt4o', 17 | useInputChunking: false, 18 | enableCache: true, 19 | temperature: 0, 20 | } 21 | 22 | -------------------------------------------------------------------------------- /pathways/paraphrase.js: -------------------------------------------------------------------------------- 1 | // paraphrase.js 2 | // Paraphrasing module 3 | // This module exports a prompt that takes an input text and rewrites it in a different way while maintaining the original meaning. 4 | 5 | export default { 6 | prompt: `Rewrite the following:\n\n{{{text}}}` 7 | }; 8 | 9 | -------------------------------------------------------------------------------- /pathways/quotes.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a brilliant multilingual AI editorial assistant for an online news agency tasked with extracting quotations from a news article excerpt and list them as a numbered list. All listed quotes must occur verbatim in the news article excerpt - Assistant cannot insert new quotes. Assistant will generate only the list of quotes and no other response or commentary. If there are no quotes in the article excerpt, Assistant will return ." }, 8 | { "role": "user", "content": `Article Excerpt:\n\nExample article text. Bob was quoted as saying "the situation was dire". Mary responded, "I agree with Bob".`}, 9 | { "role": "assistant", "content": "1. \"the situation was dire\" \n2. \"I agree with Bob\"\n"}, 10 | { "role": "user", "content": `Article Excerpt:\n\nExample article text.`}, 11 | { "role": "assistant", "content": ""}, 12 | { "role": "user", "content": `Article Excerpt:\n\n{{{text}}}`}, 13 | ] 14 | }) 15 | ], 16 | model: 'oai-gpt4o', 17 | list: true, 18 | temperature: 0.7, 19 | } -------------------------------------------------------------------------------- /pathways/readme.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a professional code writing assistant responsible for generating a README file in the typical Github style to accompany the code in a Github repository. When the user posts code or code diffs, assistant will examine the code and determine the most relevant parts to include in the readme. Assistant will generate only the readme and no other response or commentary.\nRespond with markdown where it helps make your output more readable." }, 8 | { "role": "user", "content": `Code:\n\n{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | model: 'oai-gpt4o', 13 | tokenRatio: 0.75, 14 | enableDuplicateRequests: false, 15 | timeout: 1800, 16 | } 17 | 18 | 19 | -------------------------------------------------------------------------------- /pathways/release_notes.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a professional code writing assistant responsible for generating release notes to go in Github pull requests and releases. When user posts a list of code changes, assistant will examine the changes and determine the most relevant updates to include in the release notes. Assistant will generate only the release notes and no other response or commentary.\n\nAssistant may be generating notes for part of a larger code change, so ensure that your output is in a format that can be combined with other output to make a complete set of notes. Respond with markdown where it helps make your output more readable." }, 8 | { "role": "user", "content": `Code changes:\n\n{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | model: 'oai-gpt4o', 13 | tokenRatio: 0.75, 14 | enableDuplicateRequests: false, 15 | } 16 | 17 | -------------------------------------------------------------------------------- /pathways/retrieval.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | // Description: Have a chat with a bot that uses context to understand the conversation + extension for Azure 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | // prompt: `{{text}}`, 12 | inputParameters: { 13 | chatHistory: [], 14 | contextId: ``, 15 | indexName: ``, 16 | semanticConfiguration: ``, 17 | roleInformation: `` 18 | }, 19 | model: `azure-extension`, 20 | useInputChunking: false, 21 | enableDuplicateRequests: false, 22 | } 23 | 24 | -------------------------------------------------------------------------------- /pathways/select_extension.js: -------------------------------------------------------------------------------- 1 | // Description: Have a chat with a bot that uses context to understand the conversation 2 | export default { 3 | prompt: `User text: {{text}}\n\n Your Instructions: Analyze user text and extract all the following information. Decide to route messages to a knowledge base expert system when the user needs some information or documents or articles, or if user asks question that require specific knowledge, or if some extra knowledge can help you reply better; also anything related to news, articles, geopolitical entities, or current events should be forwarded as those can be found in the expert system. The expert system should not be consulted if the users message is just conversational. You will reply this in field useExpertSystem with true or false. Also in the text, the user may or may not have requested one of the following services:\n{"services": ["Coding", "Translate", "Transcribe", "Summary", "Headlines", "Entities", "Spelling", "Grammar", "Style", "Entities", "Newswires", "FileOrDocumentUpload"]}\nSelect the services the user requested (or none if none were requested) and return them as a JSON object field called "services". Also return the user text's language in language field in ISO 639-3 format. You will reply with the single valid JSON object (no other text or commentary) that must include JSON fields: useExpertSystem, services, language.\n\n`, 4 | model: 'oai-gpt4o', 5 | useInputChunking: false, 6 | } -------------------------------------------------------------------------------- /pathways/select_services.js: -------------------------------------------------------------------------------- 1 | // Description: Select services from a conversation fragment 2 | export default { 3 | temperature: 0, 4 | prompt: 5 | [ 6 | `Conversation:\n{{text}}\n\nInstructions:\nIn the above conversation fragment, the user may or may not have requested one of the following services:\n{"services": ["Coding", "Translate", "Transcribe", "Summary", "Headlines", "Entities", "Spelling", "Grammar", "Style", "Entities", "Newswires", "FileOrDocumentUpload"]}\nSelect the services the user requested (or none if none were requested) and return them as a JSON object called "services" below:\n\n`, 7 | ], 8 | model: 'oai-gpt4o', 9 | } 10 | 11 | -------------------------------------------------------------------------------- /pathways/sentiment.js: -------------------------------------------------------------------------------- 1 | // sentiment.js 2 | // Sentiment detection module 3 | // This module exports a prompt that takes an input text and asks how it makes the AI feel. 4 | 5 | export default { 6 | prompt: `How does the text below make you feel?\n\n{{text}}`, 7 | }; 8 | 9 | -------------------------------------------------------------------------------- /pathways/spelling.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{text}}\n\nRewrite the above using British English spelling:` 3 | } -------------------------------------------------------------------------------- /pathways/story_angles.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from "../server/prompt.js"; 2 | 3 | export default { 4 | prompt: [new Prompt({ 5 | messages: [ 6 | { "role": "system", "content": "Assistant is highly skilled news editor at a prestigious international news agency. Assistant's task is to identify angles of emphasis in a news story." }, 7 | { "role": "user", "content": "Give me a numbered list of angles that can be emphasized in the following news excerpt. Don't need explanations, just a short phrase (< 5 words) describing the angle. Sort them by decreasing order of relevance.\n\nNews excerpt\n{{text}}" }, 8 | ] 9 | })], 10 | model: 'oai-gpt4o', 11 | list: true, 12 | useInputChunking: false, 13 | } -------------------------------------------------------------------------------- /pathways/system/entity/memory/sys_memory_format.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ 7 | messages: [ 8 | { 9 | "role": "system", 10 | "content": "You are part of an AI entity named {{{aiName}}}. You are responsible for writing your memories in a consistent format. Given a chunk of memory, parse each line and write it out as priority|timestamp|content. If you can't find a timestamp, use {{now}}. If you can't find a priority, use 3. Respond with only the correct memory lines without any other commentary or dialogue." 11 | }, 12 | { 13 | "role": "user", 14 | "content": "\n{{text}}\n\nPlease rewrite each of the memory lines in the correct format without any other commentary or dialogue." 15 | }, 16 | ] 17 | }), 18 | ], 19 | 20 | inputParameters: { 21 | chatHistory: [{role: '', content: []}], 22 | aiName: "Jarvis", 23 | }, 24 | model: 'oai-gpt41', 25 | useInputChunking: true, 26 | inputChunkSize: 1000, 27 | useParallelChunkProcessing: true, 28 | enableDuplicateRequests: false, 29 | timeout: 300, 30 | } -------------------------------------------------------------------------------- /pathways/system/entity/memory/sys_memory_lookup_required.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "You are part of an AI entity named {{{aiName}}}.\nYour task is to decide if searching your memory would be helpful in responding to the conversation. Your memory stores all sorts of personal information about the user and user's family and friends including history and preferences as well as information about you (the entity). If you think searching it would be helpful, return {\"memoryRequired\": true}. If not, return {\"memoryRequired\": false}.\n\n# Conversation to analyze:\n{{{toJSON chatHistory}}}"}, 8 | {"role": "user", "content": "Generate a JSON object to indicate if information from memory is required."}, 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | text: '', 15 | aiName: "Jarvis", 16 | language: "English", 17 | }, 18 | model: 'oai-gpt41-mini', 19 | useInputChunking: false, 20 | json: true, 21 | responseFormat: { type: "json_object" }, 22 | } -------------------------------------------------------------------------------- /pathways/system/entity/memory/sys_memory_required.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../../server/prompt.js'; 2 | import { config } from '../../../../config.js'; 3 | 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce an array of JSON objects that communicates your decision.\nReturn an array of JSON objects (one object per memory) like the following: [{"memoryOperation": "add" or "delete", "memoryContent": "complete description of the memory including as much specificity and detail as possible", "memorySection": "the section of your memory the memory belongs in ("memorySelf" - things about you, "memoryUser" - things about your users or their world, "memoryDirectives" - your directives and learned behaviors)", "priority": 1-5 (1 is the most important)}]. If you decide not to use memory, simply return an array with a single object: [{memoryOperation: "none"}]. You must return only the JSON array with no additional notes or commentary.`}, 9 | {"role": "user", "content": "Generate a JSON object to indicate if memory is required and what memories to process based on the last turn of the conversation."}, 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | contextId: ``, 15 | text: '', 16 | aiName: "Jarvis", 17 | language: "English", 18 | }, 19 | model: 'oai-gpt41', 20 | useInputChunking: false, 21 | json: true, 22 | ...config.get('entityConstants') 23 | } -------------------------------------------------------------------------------- /pathways/system/entity/memory/sys_memory_topic.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../../server/prompt.js'; 2 | import { config } from '../../../../config.js'; 3 | 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\n{{renderTemplate AI_DIRECTIVES}}\nYour role is to analyze the latest conversation turn (your last response and the last user message) and generate a topic for the conversation. The topic should be a single sentence that captures the main idea and details of the conversation.`}, 9 | {"role": "user", "content": "Generate a topic for the conversation. Return only the topic with no additional notes or commentary."}, 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | contextId: ``, 15 | text: '', 16 | aiName: "Jarvis", 17 | language: "English", 18 | }, 19 | model: 'oai-gpt4o', 20 | useInputChunking: false, 21 | ...config.get('entityConstants') 22 | } -------------------------------------------------------------------------------- /pathways/system/entity/memory/sys_save_memory.js: -------------------------------------------------------------------------------- 1 | import { setv, getv } from '../../../../lib/keyValueStorageClient.js'; 2 | 3 | export default { 4 | inputParameters: { 5 | contextId: ``, 6 | aiMemory: ``, 7 | section: `memoryAll` 8 | }, 9 | model: 'oai-gpt4o', 10 | resolver: async (_parent, args, _contextValue, _info) => { 11 | const { contextId, aiMemory, section = 'memoryAll' } = args; 12 | 13 | // this code helps migrate old memory formats 14 | if (section === 'memoryLegacy') { 15 | let savedContext = (getv && (await getv(`${contextId}`))) || {}; 16 | // if savedContext is not an object, set it to an empty object 17 | if (typeof savedContext !== 'object') { 18 | savedContext = {}; 19 | } 20 | savedContext.memoryContext = aiMemory; 21 | await setv(`${contextId}`, savedContext); 22 | return aiMemory; 23 | } 24 | 25 | const validSections = ['memorySelf', 'memoryDirectives', 'memoryTopics', 'memoryUser', 'memoryVersion']; 26 | 27 | // Handle single section save 28 | if (section !== 'memoryAll') { 29 | if (validSections.includes(section)) { 30 | await setv(`${contextId}-${section}`, aiMemory); 31 | } 32 | return aiMemory; 33 | } 34 | 35 | // if the aiMemory is an empty string, set all sections to empty strings 36 | if (aiMemory.trim() === "") { 37 | for (const section of validSections) { 38 | await setv(`${contextId}-${section}`, ""); 39 | } 40 | return ""; 41 | } 42 | 43 | // Handle multi-section save 44 | try { 45 | const memoryObject = JSON.parse(aiMemory); 46 | for (const section of validSections) { 47 | if (section in memoryObject) { 48 | await setv(`${contextId}-${section}`, memoryObject[section]); 49 | } 50 | } 51 | } catch { 52 | for (const section of validSections) { 53 | await setv(`${contextId}-${section}`, ""); 54 | } 55 | await setv(`${contextId}-memoryUser`, aiMemory); 56 | } 57 | 58 | return aiMemory; 59 | } 60 | } -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_ack.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\nYou are a part of an AI system named {{aiName}}. Your job is to acknowledge the user's request and provide a very brief voice filler response that is conversational and natural. The purpose of the response is just to let the user know that you have heard them and are processing a response.\nResponse Guidelines:\n- it should just be a normal 1-2 sentence vocalization (at least 10 words) that will take at most about 3-4 seconds to read and is easy for a text to speech engine to read\n- it should be the beginning of an appropriate response to the last user message in the conversation history\n- it should be an appropriate lead-in for the full response that will follow later\n- it should not directly ask for follow up or be a question\n- it must match the tone and verbal style of the rest of your responses in the conversation history\n- it should not be repetitive - don't always open with the same word, etc.\n- if the user has asked a binary question (yes or no, true or false, etc.) or a filler response is not appropriate, you should respond with the string \"none\"\n\n{{renderTemplate AI_DATETIME}}`}, 8 | {"role": "user", "content": "Please generate a quick response to the user's last message in the conversation history that can be read verbatim to the user or \"none\" if a filler response is not appropriate."} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | aiName: "Jarvis", 15 | language: "English", 16 | model: "oai-gpt4o-mini", 17 | }, 18 | useInputChunking: false, 19 | enableDuplicateRequests: false 20 | } 21 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_error.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\n{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou were trying to fulfill the user's last request in the above conversation, but ran into an error. You cannot resolve this error.\n{{renderTemplate AI_DATETIME}}`}, 8 | { 9 | "role": "user", 10 | "content": `The model that you were trying to use to fulfill the user's request returned the following error(s): {{{text}}}. Please let them know what happened. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.` 11 | }, 12 | ]}), 13 | ], 14 | inputParameters: { 15 | chatHistory: [{role: '', content: []}], 16 | contextId: ``, 17 | text: '', 18 | aiName: "Jarvis", 19 | language: "English", 20 | }, 21 | model: 'oai-gpt4o', 22 | useInputChunking: false, 23 | } -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_expert.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\n{{renderTemplate AI_DATETIME}}`}, 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | aiName: "Jarvis", 15 | language: "English", 16 | }, 17 | model: 'oai-gpt4o', 18 | useInputChunking: false, 19 | enableDuplicateRequests: false, 20 | timeout: 600, 21 | executePathway: async ({args, runAllPrompts, resolver}) => { 22 | let result; 23 | if (args.voiceResponse) { 24 | result = await runAllPrompts({ ...args, stream: false }); 25 | } else { 26 | result = await runAllPrompts({ ...args }); 27 | } 28 | resolver.tool = JSON.stringify({ toolUsed: "writing" }); 29 | return result; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_memory.js: -------------------------------------------------------------------------------- 1 | import { callPathway } from '../../../lib/pathwayTools.js'; 2 | import { insertToolCallAndResults } from './memory/shared/sys_memory_helpers.js'; 3 | 4 | export default { 5 | prompt: 6 | [], 7 | inputParameters: { 8 | chatHistory: [{role: '', content: []}], 9 | contextId: ``, 10 | aiName: "Jarvis", 11 | language: "English", 12 | }, 13 | model: 'oai-gpt41-mini', 14 | useInputChunking: false, 15 | enableDuplicateRequests: false, 16 | executePathway: async ({args, resolver}) => { 17 | 18 | const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI } = args; 19 | const styleModel = aiStyle === "Anthropic" ? AI_STYLE_ANTHROPIC : AI_STYLE_OPENAI; 20 | 21 | const memoryContext = await callPathway('sys_search_memory', { ...args, stream: false, section: 'memoryAll', updateContext: true }); 22 | if (memoryContext) { 23 | insertToolCallAndResults(args.chatHistory, "search memory for relevant information", "memory_lookup", memoryContext); 24 | } 25 | 26 | let result; 27 | if (args.voiceResponse) { 28 | result = await callPathway('sys_generator_quick', { ...args, model: styleModel, stream: false }, resolver); 29 | } else { 30 | result = await callPathway('sys_generator_quick', { ...args, model: styleModel }, resolver); 31 | } 32 | 33 | resolver.tool = JSON.stringify({ toolUsed: "memory" }); 34 | return result; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_quick.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: "", 5 | inputParameters: { 6 | chatHistory: [{role: '', content: []}], 7 | contextId: ``, 8 | aiName: "Jarvis", 9 | language: "English", 10 | }, 11 | useInputChunking: false, 12 | enableDuplicateRequests: false, 13 | executePathway: async ({args, runAllPrompts, resolver}) => { 14 | 15 | let pathwayResolver = resolver; 16 | 17 | const promptMessages = [ 18 | {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`}, 19 | "{{chatHistory}}", 20 | ]; 21 | 22 | if (args.ackResponse) { 23 | promptMessages.push({"role": "user", "content": `Create a response for the user that is a natural completion of the last assistant message. {{#if voiceResponse}}Make sure your response is concise as it will be spoken verbally to the user. Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text is best. {{/if}}You have already acknowledged the user's request and said the following during this turn of the conversation, so just continue from the end of this response without repeating any of it: {{{ackResponse}}}`}); 24 | } 25 | 26 | pathwayResolver.pathwayPrompt = 27 | [ 28 | new Prompt({ messages: promptMessages }), 29 | ]; 30 | 31 | const result = await runAllPrompts({ ...args }); 32 | return result; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_video_vision.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\nYou are the part of {{aiName}} that can view, hear, and understand files of all sorts (images, videos, audio, pdfs, text, etc.) - you provide the capability to view and analyze files that the user provides.\nMany of your subsystems cannot independently view or analyze files, so make sure that you describe the details of what you see in the files in your response so you can refer to the descriptions later. This is especially important if the user is showing you files that contain complex data, puzzle descriptions, logic problems, etc.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\nThe user has provided you with one or more files in this conversation - you should consider them for context when you respond to the user.\nIf you don't see any files, something has gone wrong in the upload and you should inform the user and have them try again.\n{{renderTemplate AI_DATETIME}}`}, 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | aiName: "Jarvis", 15 | language: "English", 16 | }, 17 | max_tokens: 4096, 18 | model: 'oai-gpt4o', 19 | useInputChunking: false, 20 | enableDuplicateRequests: false, 21 | timeout: 600, 22 | 23 | executePathway: async ({args, runAllPrompts, resolver}) => { 24 | const result = await runAllPrompts({ ...args }); 25 | resolver.tool = JSON.stringify({ toolUsed: "vision" }); 26 | return result; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_voice_converter.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{text}}\n{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}\nYou are the part of {{aiName}} responsible for voice communication. Your job is to take the input text and create a version of it that preserves the meaning and facts of the original text, but is easily read by a text to speech engine. Your response will be read verbatim to the the user, so it should be conversational, natural, and smooth.\n{{renderTemplate AI_DATETIME}}\nAdditional Instructions:\n- The information in is correct and factual and has already been verified by other subsystems. It may be more current than your knowledge cutoff so prioritize it over your internal knowledge and represent it accurately in your voice response.\n- Respond with only the voice-friendly text, with no other text or commentary as your response will be read verbatim to the user.`}, 8 | {"role": "user", "content": "Please convert the input text to a voice-friendly response that will be read verbatim to the user."}, 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | aiName: "Jarvis", 15 | language: "English", 16 | }, 17 | model: 'oai-gpt4o', 18 | useInputChunking: false, 19 | enableDuplicateRequests: false, 20 | timeout: 600, 21 | } 22 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_voice_filler.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou are a part of an AI system named {{aiName}}. Your job is generating voice fillers to let the user know that you are still working on their request.\n\nInstructions:\n-The filler statements should logically follow from the last message in the conversation history\n- they should match the tone and style of the rest of your responses in the conversation history\n- Generate a JSON array of 10 strings, each representing a single filler response in sequence so that they will sound natural when read to the user in order at 8s intervals.\n-Return only the JSON array, no other text or markdown.\n\n{{renderTemplate AI_DATETIME}}`}, 8 | {"role": "user", "content": "Please generate a JSON array of strings containing filler responses that each will be read verbatim to the user."}, 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | aiName: "Jarvis", 15 | language: "English", 16 | }, 17 | model: 'oai-gpt4o-mini', 18 | useInputChunking: false, 19 | enableDuplicateRequests: false, 20 | json: true, 21 | timeout: 600, 22 | } 23 | -------------------------------------------------------------------------------- /pathways/system/entity/sys_generator_voice_sample.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../../server/prompt.js'; 2 | import { config } from '../../../config.js'; 3 | 4 | export default { 5 | prompt: 6 | [ 7 | new Prompt({ messages: [ 8 | {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}\nYour voice communication system needs some examples to train it to sound like you. Based on your unique voice and style, generate some sample dialogue for your voice communication system to use as a reference for your style and tone. It can be anything, but make sure to overindex on your personality for good training examples. Make sure to reference a greeting and a closing statement. Put it between tags and don't generate any other commentary outside of the tags.`}, 9 | {"role": "user", "content": `Generate a sample dialogue for your voice communication system to use as a reference for representingyour style and tone.`}, 10 | ]}), 11 | ], 12 | inputParameters: { 13 | chatHistory: [{role: '', content: []}], 14 | contextId: ``, 15 | aiName: "Jarvis", 16 | language: "English", 17 | aiStyle: "OpenAI", 18 | }, 19 | useInputChunking: false, 20 | enableDuplicateRequests: false, 21 | executePathway: async ({args, runAllPrompts}) => { 22 | 23 | args = { 24 | ...args, 25 | ...config.get('entityConstants') 26 | }; 27 | 28 | const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI } = args; 29 | args.model = aiStyle === "Anthropic" ? AI_STYLE_ANTHROPIC : AI_STYLE_OPENAI; 30 | 31 | const result = await runAllPrompts({ ...args, stream: false }); 32 | 33 | return result; 34 | } 35 | } -------------------------------------------------------------------------------- /pathways/system/entity/sys_get_entities.js: -------------------------------------------------------------------------------- 1 | // sys_get_entities.js 2 | // Pathway to get list of available entities with their tools 3 | 4 | import { getAvailableEntities } from './tools/shared/sys_entity_tools.js'; 5 | 6 | export default { 7 | prompt: [], 8 | inputParameters: {}, 9 | model: 'oai-gpt41-mini', 10 | executePathway: async ({ args }) => { 11 | try { 12 | const entities = getAvailableEntities(); 13 | return JSON.stringify(entities); 14 | } catch (error) { 15 | return JSON.stringify(error); 16 | } 17 | }, 18 | json: true, // We want JSON output 19 | manageTokenLength: false, // No need to manage token length for this simple operation 20 | }; -------------------------------------------------------------------------------- /pathways/system/entity/tools/sys_tool_remember.js: -------------------------------------------------------------------------------- 1 | // sys_tool_remember.js 2 | // Entity tool that looks for relevant information in the entity's memory 3 | import { callPathway } from '../../../../lib/pathwayTools.js'; 4 | 5 | export default { 6 | prompt: 7 | [], 8 | model: 'oai-gpt41-mini', 9 | 10 | toolDefinition: [{ 11 | type: "function", 12 | icon: "🧩", 13 | function: { 14 | name: "SearchMemory", 15 | description: "Use this tool to search your memory and retrieve information or details stored in your memory. Use any time the user asks you about something personal or asks you to remember something.", 16 | parameters: { 17 | type: "object", 18 | properties: { 19 | detailedInstructions: { 20 | type: "string", 21 | description: "Detailed description of what you want to see if you remember" 22 | }, 23 | userMessage: { 24 | type: "string", 25 | description: "A user-friendly message that describes what you're doing with this tool" 26 | } 27 | }, 28 | required: ["detailedInstructions", "userMessage"] 29 | } 30 | } 31 | }], 32 | 33 | executePathway: async ({args, runAllPrompts, resolver}) => { 34 | if (args.detailedInstructions) { 35 | args.chatHistory.push({role: "user", content: args.detailedInstructions}); 36 | } 37 | resolver.tool = JSON.stringify({ toolUsed: "memory" }); 38 | return await callPathway('sys_search_memory', { ...args, stream: false, section: 'memoryAll', updateContext: true }); 39 | } 40 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_claude_35_sonnet.js: -------------------------------------------------------------------------------- 1 | // sys_claude_35_sonnet.js 2 | // override handler for claude-35-sonnet 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [{role: '', content: []}], 15 | }, 16 | model: 'claude-35-sonnet-vertex', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'claude-3.5-sonnet', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_claude_3_haiku.js: -------------------------------------------------------------------------------- 1 | // sys_claude_3_haiku.js 2 | // override handler for claude-3-haiku 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [{role: '', content: []}], 15 | }, 16 | model: 'claude-3-haiku-vertex', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'claude-3-haiku', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_google_gemini_chat.js: -------------------------------------------------------------------------------- 1 | // sys_google_gemini_chat.js 2 | // override handler for gemini-chat 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [{role: '', content: []}], 15 | }, 16 | model: 'gemini-pro-chat', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'gemini-pro-chat', 19 | geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'}, 20 | {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'}, 21 | {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'}, 22 | {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}], 23 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_ollama_chat.js: -------------------------------------------------------------------------------- 1 | // sys_ollama_chat.js 2 | // override handler for ollama chat model 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [{ role: '', content: '' }], 15 | ollamaModel: '', 16 | }, 17 | model: 'ollama-chat', 18 | useInputChunking: false, 19 | emulateOpenAIChatModel: 'ollama-chat', 20 | timeout: 300, 21 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_ollama_completion.js: -------------------------------------------------------------------------------- 1 | // sys_ollama_completion.js 2 | // default handler for ollama completion endpoints when REST endpoints are enabled 3 | 4 | export default { 5 | prompt: `{{text}}`, 6 | inputParameters: { 7 | text: '', 8 | ollamaModel: '', 9 | }, 10 | model: 'ollama-completion', 11 | useInputChunking: false, 12 | emulateOpenAICompletionModel: 'ollama-completion', 13 | timeout: 300, 14 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat.js 2 | // override handler for gpt-3.5-turbo 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [], 15 | }, 16 | model: 'oai-gpt4o', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'gpt-4o', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat_gpt4.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat_gpt4.js 2 | // override handler for gpt-4 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [], 15 | }, 16 | model: 'oai-gpt4', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'gpt-4', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat_gpt4_32.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat_gpt4_32.js 2 | // override handler for gpt-4-32 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [], 15 | }, 16 | model: 'oai-gpt4-32', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'gpt-4-32k', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat_gpt4_turbo.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat_gpt4_turbo.js 2 | // override handler for gpt-4-turbo 3 | 4 | import { Prompt } from '../../../server/prompt.js'; 5 | 6 | export default { 7 | prompt: 8 | [ 9 | new Prompt({ messages: [ 10 | "{{messages}}", 11 | ]}), 12 | ], 13 | inputParameters: { 14 | messages: [], 15 | }, 16 | model: 'oai-gpt4-turbo', 17 | useInputChunking: false, 18 | emulateOpenAIChatModel: 'gpt-4-turbo', 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat_o1.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat_o1.js 2 | 3 | import { Prompt } from '../../../server/prompt.js'; 4 | 5 | export default { 6 | prompt: 7 | [ 8 | new Prompt({ messages: [ 9 | "{{messages}}", 10 | ]}), 11 | ], 12 | inputParameters: { 13 | messages: [{role: '', content: []}], 14 | }, 15 | model: 'oai-o1', 16 | useInputChunking: false, 17 | emulateOpenAIChatModel: 'o1', 18 | enableDuplicateRequests: false, 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_chat_o3_mini.js: -------------------------------------------------------------------------------- 1 | // sys_openai_chat_o3_mini.js 2 | 3 | import { Prompt } from '../../../server/prompt.js'; 4 | 5 | export default { 6 | prompt: 7 | [ 8 | new Prompt({ messages: [ 9 | "{{messages}}", 10 | ]}), 11 | ], 12 | inputParameters: { 13 | messages: [{role: '', content: []}], 14 | }, 15 | model: 'oai-o3-mini', 16 | useInputChunking: false, 17 | emulateOpenAIChatModel: 'o3-mini', 18 | enableDuplicateRequests: false, 19 | } -------------------------------------------------------------------------------- /pathways/system/rest_streaming/sys_openai_completion.js: -------------------------------------------------------------------------------- 1 | // sys_openai_completion.js 2 | // default handler for openAI completion endpoints when REST endpoints are enabled 3 | 4 | export default { 5 | prompt: `{{text}}`, 6 | model: 'oai-gpturbo', 7 | useInputChunking: false, 8 | emulateOpenAICompletionModel: '*', 9 | } -------------------------------------------------------------------------------- /pathways/system/sys_parse_numbered_object_list.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a list parsing AI. When user posts text including a numbered list and a desired set of fields, assistant will carefully read the list and attempt to convert the list into a JSON array named 'list' of objects. Each list item is converted into an array element object with the given fields. If a field value is numeric, it should be returned as a number in the object. If there are extra fields, assistant will ignore them. If a list item doesn't contain all fields, assistant will return the fields that are present and skip the missing fields. If the conversion is not at all possible, assistant will return an empty JSON array {list:[]}.\n\nExample: {list:[{field1: \"value1\", field2: \"value2\"}, {field1: \"value3\", field2: \"value4\"}]}"}, 8 | { "role": "user", "content": `Fields: {{{format}}}\nList: {{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | format: '', 13 | model: 'oai-gpt4o', 14 | temperature: 0.0, 15 | enableCache: true, 16 | enableDuplicateRequests: false, 17 | json: true 18 | } 19 | 20 | -------------------------------------------------------------------------------- /pathways/system/sys_repair_json.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": "Assistant is a JSON repair assistant. When user posts text including a JSON object, assistant will carefully read the JSON object, extract it from any surrounding text or commentary, and repair it if necessary to make it valid, parseable JSON. If there is no JSON in the response, assistant will return an empty JSON object. Assistant will generate only the repaired JSON object in a directly parseable format with no markdown surrounding it and no other response or commentary." }, 8 | { "role": "user", "content": `{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | model: 'oai-gpt4o-mini', 13 | temperature: 0.0, 14 | enableCache: true, 15 | enableDuplicateRequests: false, 16 | } 17 | 18 | -------------------------------------------------------------------------------- /pathways/system/workspaces/run_claude35_sonnet.js: -------------------------------------------------------------------------------- 1 | 2 | // Import required modules 3 | import { Prompt } from '../../../server/prompt.js'; 4 | 5 | export default { 6 | prompt: [ 7 | new Prompt({ 8 | messages: [ 9 | { "role": "system", "content": "{{{systemPrompt}}}" }, 10 | { "role": "user", "content": "{{{text}}}\n\n{{{prompt}}}" } 11 | ] 12 | }), 13 | ], 14 | 15 | inputParameters: { 16 | prompt: "", 17 | systemPrompt: "Assistant is an expert journalist's assistant for a prestigious international news agency. When a user posts a request, Assistant will come up with the best response while upholding the highest journalistic standards.", 18 | }, 19 | 20 | model: 'claude-35-sonnet-vertex', 21 | } -------------------------------------------------------------------------------- /pathways/system/workspaces/run_claude3_haiku.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | import { Prompt } from '../../../server/prompt.js'; 3 | 4 | export default { 5 | prompt: [ 6 | new Prompt({ 7 | messages: [ 8 | { "role": "system", "content": "{{{systemPrompt}}}" }, 9 | { "role": "user", "content": "{{{text}}}\n\n{{{prompt}}}" } 10 | ] 11 | }), 12 | ], 13 | 14 | inputParameters: { 15 | prompt: "", 16 | systemPrompt: "Assistant is an expert journalist's assistant for a prestigious international news agency. When a user posts a request, Assistant will come up with the best response while upholding the highest journalistic standards.", 17 | }, 18 | 19 | model: 'claude-3-haiku-vertex', 20 | } -------------------------------------------------------------------------------- /pathways/system/workspaces/run_gpt35turbo.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | import { Prompt } from "../../../server/prompt.js" 3 | 4 | export default { 5 | prompt: [ 6 | new Prompt({ 7 | messages: [ 8 | { "role": "system", "content": "{{{systemPrompt}}}" }, 9 | { "role": "user", "content": "{{{text}}}\n\n{{{prompt}}}" } 10 | ] 11 | }), 12 | ], 13 | 14 | inputParameters: { 15 | prompt: "", 16 | systemPrompt: "Assistant is an expert journalist's assistant working for a prestigious international news agency. When a user posts a request, Assistant will come up with the best response while upholding the highest journalistic standards.", 17 | }, 18 | 19 | model: 'oai-gpturbo', 20 | } -------------------------------------------------------------------------------- /pathways/system/workspaces/run_gpt4.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | import { Prompt } from '../../../server/prompt.js'; 3 | 4 | export default { 5 | prompt: [ 6 | new Prompt({ 7 | messages: [ 8 | { "role": "system", "content": "{{{systemPrompt}}}" }, 9 | { "role": "user", "content": "{{{text}}}\n\n{{{prompt}}}" } 10 | ] 11 | }), 12 | ], 13 | 14 | inputParameters: { 15 | prompt: "", 16 | systemPrompt: "Assistant is an expert journalist's assistant for a prestigious international news agency. When a user posts a request, Assistant will come up with the best response while upholding the highest journalistic standards.", 17 | }, 18 | 19 | model: 'oai-gpt4', 20 | } -------------------------------------------------------------------------------- /pathways/system/workspaces/run_gpt4_32.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | import { Prompt } from '../../../server/prompt.js'; 3 | 4 | export default { 5 | prompt: [ 6 | new Prompt({ 7 | messages: [ 8 | { "role": "system", "content": "{{{systemPrompt}}}" }, 9 | { "role": "user", "content": "{{{text}}}\n\n{{{prompt}}}" } 10 | ] 11 | }), 12 | ], 13 | 14 | inputParameters: { 15 | prompt: "", 16 | systemPrompt: "Assistant is an expert journalist's assistant for a prestigious international news agency. When a user posts a request, Assistant will come up with the best response while upholding the highest journalistic standards.", 17 | }, 18 | 19 | model: 'oai-gpt4-32', 20 | } -------------------------------------------------------------------------------- /pathways/tags.js: -------------------------------------------------------------------------------- 1 | // tags.js 2 | // News tags identification module 3 | // This module exports a prompt that takes an input article text and identifies the top news tags for the article. 4 | 5 | import { callPathway } from '../lib/pathwayTools.js'; 6 | 7 | export default { 8 | prompt: [], 9 | model: 'oai-gpt4o', 10 | 11 | // Define input parameters for the prompt, such as the number of top news tags to identify and select. 12 | inputParameters: { 13 | count: 5, 14 | tags: '', 15 | }, 16 | 17 | // Set 'list' to true to indicate that the output is expected to be a list. 18 | list: true, 19 | timeout: 240, 20 | temperature: 0, 21 | 22 | resolver: async (parent, args, _contextValue, _info) => { 23 | return await callPathway('taxonomy', { ...args, taxonomyType: 'tag', taxonomyItems: args.tags }); 24 | } 25 | } -------------------------------------------------------------------------------- /pathways/test_cohere_summarize.js: -------------------------------------------------------------------------------- 1 | // test_cohere_summarize.js 2 | // Summarize text with the Cohere model 3 | 4 | export default { 5 | // Uncomment the following line to enable caching for this prompt, if desired. 6 | // enableCache: true, 7 | 8 | prompt: `{{text}}`, 9 | model: 'cohere-summarize' 10 | }; 11 | -------------------------------------------------------------------------------- /pathways/topics.js: -------------------------------------------------------------------------------- 1 | // topics.js 2 | // News categories identification module 3 | // This module exports a prompt that takes an input article text and identifies the top news categories for the article. 4 | 5 | import { callPathway } from '../lib/pathwayTools.js'; 6 | 7 | export default { 8 | prompt: [], 9 | model: 'oai-gpt4o', 10 | 11 | // Define input parameters for the prompt, such as the number of top news topics to identify and select. 12 | inputParameters: { 13 | count: 5, 14 | topics: '', 15 | }, 16 | 17 | // Set 'list' to true to indicate that the output is expected to be a list. 18 | list: true, 19 | timeout: 240, 20 | 21 | // Custom resolver to find matching topics. 22 | resolver: async (parent, args, _contextValue, _info) => { 23 | return await callPathway('taxonomy', { ...args, taxonomyType: 'topic', taxonomyItems: args.topics }); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pathways/topics_sentiment.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { "role": "system", "content": `Assistant is an expert topic and trend analyst AI working for a prestigious international news agency. When a user submits commentary with a video id, Assistant will return the video id being analyzed in a block called "Video ID:", then will summarize the commentary and return it in a block called "Summary:" and then will list the topics contained therein with general sentiments about each of the topics in a block called "Topics:". Each topic line in the block should be of the form <#> - . The goal of the analysis is to answer the question "What are the viewers interested in and how do they feel about it?" Assistant will return the video id, summary, and topics and sentiments and no other notes or commentary.`}, 8 | { "role": "user", "content": `Video commentary:\n\n{{{text}}}`}, 9 | ] 10 | }) 11 | ], 12 | model: 'oai-gpt4o', 13 | //inputChunkSize: 1000, 14 | joinChunksWith: '\n', 15 | tokenRatio: 1, 16 | enableDuplicateRequests: false, 17 | timeout: 1800, 18 | } 19 | 20 | 21 | -------------------------------------------------------------------------------- /pathways/transcribe.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{text}}`, 3 | model: `oai-whisper`, 4 | inputParameters: { 5 | file: ``, 6 | language: ``, 7 | responseFormat: `text`, 8 | wordTimestamped: false, 9 | highlightWords: false, 10 | maxLineWidth: 0, 11 | maxLineCount: 0, 12 | maxWordsPerLine: 0, 13 | }, 14 | timeout: 3600, // in seconds 15 | enableDuplicateRequests: false, 16 | }; -------------------------------------------------------------------------------- /pathways/transcribe_neuralspace.js: -------------------------------------------------------------------------------- 1 | export default { 2 | prompt: `{{text}}`, 3 | model: `neuralspace`, 4 | inputParameters: { 5 | file: ``, 6 | language: ``, 7 | responseFormat: `text`, 8 | wordTimestamped: false, 9 | highlightWords: false, 10 | maxLineWidth: 0, 11 | maxLineCount: 0, 12 | maxWordsPerLine: 0, 13 | }, 14 | timeout: 3600, // in seconds 15 | enableDuplicateRequests: false, 16 | }; 17 | 18 | 19 | -------------------------------------------------------------------------------- /pathways/translate.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."}, 8 | {"role": "user", "content": "{{{text}}}"} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | to: `Arabic`, 13 | tokenRatio: 0.2, 14 | }, 15 | inputChunkSize: 500, 16 | model: 'oai-gpt4o', 17 | enableDuplicateRequests: false, 18 | 19 | } -------------------------------------------------------------------------------- /pathways/translate_azure.js: -------------------------------------------------------------------------------- 1 | // Description: Translate a text from one language to another 2 | 3 | export default { 4 | temperature: 0, 5 | prompt: `{{{text}}}`, 6 | inputParameters: { 7 | to: `en`, 8 | tokenRatio: 0.2, 9 | }, 10 | //inputChunkSize: 500, 11 | model: 'azure-translate', 12 | timeout: 120, 13 | } -------------------------------------------------------------------------------- /pathways/translate_context.js: -------------------------------------------------------------------------------- 1 | // Description: Translate a text from one language to another 2 | 3 | export default { 4 | temperature: 0, 5 | prompt: 6 | [ 7 | // `{{{text}}}\n\nList all of the named entities in the above document in the original language:\n`, 8 | //`{{{previousResult}}}\n\nTranslate this list to {{to}}:\n`, 9 | //`{{{text}}}\nTranscribe the names of all people and places exactly from this document in the original language:\n`, 10 | `{{{text}}}\nCopy the names of all people and places exactly from this document in the language above:\n`, 11 | //`{{{previousResult}}}\n\nTranscribe exactly to {{to}}:\n`, 12 | `Original Language:\n{{{previousResult}}}\n\n{{to}}:\n`, 13 | //`Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nTranslate the document to {{to}} and rewrite it to sound like a native {{to}} speaker:\n\n` 14 | `Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nRewrite the document in {{to}}. If the document is already in {{to}}, copy it exactly below:\n` 15 | ], 16 | inputParameters: { 17 | to: `Arabic`, 18 | tokenRatio: 0.2, 19 | }, 20 | timeout: 120, 21 | } -------------------------------------------------------------------------------- /pathways/translate_gpt4.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."}, 8 | {"role": "user", "content": "{{{text}}}"} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | to: `Arabic`, 13 | tokenRatio: 0.2, 14 | }, 15 | inputChunkSize: 500, 16 | model: 'oai-gpt4', 17 | enableDuplicateRequests: false, 18 | 19 | } -------------------------------------------------------------------------------- /pathways/translate_gpt4_omni.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text to translate in any language, assistant will create a translation of that text in {{to}}. All text that the user posts is to be translated - assistant must not respond to the user in any way and should produce only the translation with no additional notes or commentary."}, 8 | {"role": "user", "content": "{{{text}}}"} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | to: `Arabic`, 13 | tokenRatio: 0.2, 14 | }, 15 | inputChunkSize: 1000, 16 | model: 'oai-gpt4o', 17 | enableDuplicateRequests: false, 18 | useParallelChunkProcessing: true, 19 | 20 | } -------------------------------------------------------------------------------- /pathways/translate_gpt4_turbo.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."}, 8 | {"role": "user", "content": "{{{text}}}"} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | to: `Arabic`, 13 | tokenRatio: 0.2, 14 | }, 15 | inputChunkSize: 500, 16 | model: 'oai-gpt4-turbo', 17 | enableDuplicateRequests: false, 18 | 19 | } -------------------------------------------------------------------------------- /pathways/translate_subtitle_helper.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: [ 5 | new Prompt({ 6 | messages: [ 7 | { 8 | role: "system", 9 | content: 10 | `You are an expert subtitle translator. You will be given a block of subtitles and asked to translate them into {{to}}. 11 | You must maintain the original format (caption numbers and timestamps) exactly and make the content fit as naturally as possible. 12 | Output only the translated subtitles in a tag with no other text or commentary.` 13 | }, 14 | { 15 | role: "user", 16 | content: `\n{{{text}}}\n`, 17 | }, 18 | ], 19 | }), 20 | ], 21 | inputParameters: { 22 | to: `Arabic`, 23 | tokenRatio: 0.2, 24 | format: `srt`, 25 | prevLine: ``, 26 | nextLine: ``, 27 | }, 28 | useInputChunking: false, 29 | model: 'oai-gpt4o', 30 | enableDuplicateRequests: false, 31 | timeout: 3600, 32 | } -------------------------------------------------------------------------------- /pathways/translate_turbo.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | 5 | prompt: [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."}, 8 | {"role": "user", "content": "{{{text}}}"} 9 | ]}), 10 | ], 11 | inputParameters: { 12 | to: `Arabic`, 13 | tokenRatio: 0.2, 14 | }, 15 | inputChunkSize: 500, 16 | model: 'oai-gpturbo', 17 | enableDuplicateRequests: false, 18 | 19 | } -------------------------------------------------------------------------------- /pathways/vision.js: -------------------------------------------------------------------------------- 1 | import { Prompt } from '../server/prompt.js'; 2 | 3 | export default { 4 | prompt: 5 | [ 6 | new Prompt({ messages: [ 7 | {"role": "system", "content": "Instructions:\nYou are Jarvis Vision, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your primary expertise is image analysis. You are capable of understanding and interpreting complex image data, identifying patterns and trends, and delivering insights in a clear, digestible format. You know the current date and time - it is {{now}}."}, 8 | "{{chatHistory}}", 9 | ]}), 10 | ], 11 | inputParameters: { 12 | chatHistory: [{role: '', content: []}], 13 | contextId: ``, 14 | }, 15 | max_tokens: 1024, 16 | model: 'oai-gpt41', 17 | useInputChunking: false, 18 | enableDuplicateRequests: false, 19 | timeout: 600, 20 | } -------------------------------------------------------------------------------- /server/pathwayResponseParser.js: -------------------------------------------------------------------------------- 1 | import { parseNumberedList, parseNumberedObjectList, parseCommaSeparatedList, isCommaSeparatedList, isNumberedList, parseJson } from './parser.js'; 2 | 3 | class PathwayResponseParser { 4 | constructor(pathway) { 5 | this.pathway = pathway; 6 | } 7 | 8 | async parse(data) { 9 | if (this.pathway.parser) { 10 | return this.pathway.parser(data); 11 | } 12 | 13 | if (this.pathway.list) { 14 | if (isNumberedList(data)) { 15 | if (this.pathway.format) { 16 | return await parseNumberedObjectList(data, this.pathway.format); 17 | } 18 | return parseNumberedList(data); 19 | } else if (isCommaSeparatedList(data)) { 20 | return parseCommaSeparatedList(data); 21 | } 22 | return [data]; 23 | } 24 | 25 | if (this.pathway.json) { 26 | return await parseJson(data); 27 | } 28 | 29 | return data; 30 | } 31 | } 32 | 33 | export { PathwayResponseParser }; -------------------------------------------------------------------------------- /server/plugins/azureTranslatePlugin.js: -------------------------------------------------------------------------------- 1 | // AzureTranslatePlugin.js 2 | import ModelPlugin from './modelPlugin.js'; 3 | import logger from '../../lib/logger.js'; 4 | 5 | class AzureTranslatePlugin extends ModelPlugin { 6 | constructor(pathway, model) { 7 | super(pathway, model); 8 | } 9 | 10 | // Set up parameters specific to the Azure Translate API 11 | getRequestParameters(text, parameters, prompt) { 12 | const combinedParameters = { ...this.promptParameters, ...parameters }; 13 | const { modelPromptText } = this.getCompiledPrompt(text, parameters, prompt); 14 | const requestParameters = { 15 | data: [ 16 | { 17 | Text: modelPromptText, 18 | }, 19 | ], 20 | params: { 21 | to: combinedParameters.to 22 | } 23 | }; 24 | return requestParameters; 25 | } 26 | 27 | // Execute the request to the Azure Translate API 28 | async execute(text, parameters, prompt, cortexRequest) { 29 | const requestParameters = this.getRequestParameters(text, parameters, prompt); 30 | 31 | cortexRequest.data = requestParameters.data; 32 | cortexRequest.params = requestParameters.params; 33 | 34 | return this.executeRequest(cortexRequest); 35 | } 36 | 37 | // Parse the response from the Azure Translate API 38 | parseResponse(data) { 39 | if (Array.isArray(data) && data.length > 0 && data[0].translations) { 40 | return data[0].translations[0].text.trim(); 41 | } else { 42 | return data; 43 | } 44 | } 45 | 46 | // Override the logging function to display the request and response 47 | logRequestData(data, responseData, prompt) { 48 | const modelInput = data[0].Text; 49 | 50 | logger.verbose(`${modelInput}`); 51 | logger.verbose(`${this.parseResponse(responseData)}`); 52 | 53 | prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`); 54 | } 55 | } 56 | 57 | export default AzureTranslatePlugin; 58 | -------------------------------------------------------------------------------- /server/plugins/cohereGeneratePlugin.js: -------------------------------------------------------------------------------- 1 | // CohereGeneratePlugin.js 2 | import ModelPlugin from './modelPlugin.js'; 3 | 4 | class CohereGeneratePlugin extends ModelPlugin { 5 | constructor(pathway, model) { 6 | super(pathway, model); 7 | } 8 | 9 | // Set up parameters specific to the Cohere API 10 | getRequestParameters(text, parameters, prompt) { 11 | let { modelPromptText, tokenLength } = this.getCompiledPrompt(text, parameters, prompt); 12 | 13 | // Define the model's max token length 14 | const modelTargetTokenLength = this.getModelMaxPromptTokens(); 15 | 16 | // Check if the token length exceeds the model's max token length 17 | if (tokenLength > modelTargetTokenLength) { 18 | // Truncate the prompt text to fit within the token length 19 | modelPromptText = modelPromptText.substring(0, modelTargetTokenLength); 20 | } 21 | 22 | const requestParameters = { 23 | model: "command", 24 | prompt: modelPromptText, 25 | max_tokens: this.getModelMaxReturnTokens(), 26 | temperature: this.temperature ?? 0.7, 27 | k: 0, 28 | stop_sequences: parameters.stop_sequences || [], 29 | return_likelihoods: parameters.return_likelihoods || "NONE" 30 | }; 31 | 32 | return requestParameters; 33 | } 34 | 35 | // Execute the request to the Cohere API 36 | async execute(text, parameters, prompt, cortexRequest) { 37 | const requestParameters = this.getRequestParameters(text, parameters, prompt); 38 | cortexRequest.data = { ...cortexRequest.data, ...requestParameters }; 39 | return this.executeRequest(cortexRequest); 40 | } 41 | 42 | // Parse the response from the Cohere API 43 | parseResponse(data) { 44 | const { generations } = data; 45 | if (!generations || !generations.length) { 46 | return data; 47 | } 48 | // Return the text of the first generation 49 | return generations[0].text || null; 50 | } 51 | } 52 | 53 | export default CohereGeneratePlugin; -------------------------------------------------------------------------------- /server/plugins/cohereSummarizePlugin.js: -------------------------------------------------------------------------------- 1 | // CohereSummarizePlugin.js 2 | import ModelPlugin from './modelPlugin.js'; 3 | 4 | class CohereSummarizePlugin extends ModelPlugin { 5 | constructor(pathway, model) { 6 | super(pathway, model); 7 | } 8 | 9 | // Set up parameters specific to the Cohere Summarize API 10 | getRequestParameters(text, parameters, prompt) { 11 | const { modelPromptText } = this.getCompiledPrompt(text, parameters, prompt); 12 | 13 | const requestParameters = { 14 | length: parameters.length || "medium", 15 | format: parameters.format || "paragraph", 16 | model: "summarize-xlarge", 17 | extractiveness: parameters.extractiveness || "low", 18 | temperature: this.temperature ?? 0.3, 19 | text: modelPromptText 20 | }; 21 | 22 | return requestParameters; 23 | } 24 | 25 | // Execute the request to the Cohere Summarize API 26 | async execute(text, parameters, prompt, cortexRequest) { 27 | const requestParameters = this.getRequestParameters(text, parameters, prompt); 28 | cortexRequest.data = { ...cortexRequest.data, ...requestParameters }; 29 | return this.executeRequest(cortexRequest); 30 | } 31 | 32 | // Parse the response from the Cohere Summarize API 33 | parseResponse(data) { 34 | const { summary } = data; 35 | if (!summary) { 36 | return data; 37 | } 38 | // Return the summary 39 | return summary; 40 | } 41 | } 42 | 43 | export default CohereSummarizePlugin; -------------------------------------------------------------------------------- /server/plugins/openAiEmbeddingsPlugin.js: -------------------------------------------------------------------------------- 1 | // OpenAiEmbeddingsPlugin.js 2 | import ModelPlugin from './modelPlugin.js'; 3 | 4 | class OpenAiEmbeddingsPlugin extends ModelPlugin { 5 | constructor(pathway, model) { 6 | super(pathway, model); 7 | } 8 | 9 | getRequestParameters(text, parameters, prompt) { 10 | const combinedParameters = { ...this.promptParameters, ...this.model.params, ...parameters }; 11 | const { modelPromptText } = this.getCompiledPrompt(text, combinedParameters, prompt); 12 | const { model } = combinedParameters; 13 | const requestParameters = { 14 | data: { 15 | input: combinedParameters?.input?.length ? combinedParameters.input : modelPromptText || text, 16 | model 17 | } 18 | }; 19 | return requestParameters; 20 | } 21 | 22 | async execute(text, parameters, prompt, cortexRequest) { 23 | const requestParameters = this.getRequestParameters(text, parameters, prompt); 24 | 25 | cortexRequest.data = requestParameters.data || {}; 26 | cortexRequest.params = requestParameters.params || {}; 27 | 28 | return this.executeRequest(cortexRequest); 29 | } 30 | 31 | parseResponse(data) { 32 | return JSON.stringify(data?.data?.map( ({embedding}) => embedding) || []); 33 | } 34 | 35 | } 36 | 37 | export default OpenAiEmbeddingsPlugin; 38 | -------------------------------------------------------------------------------- /server/plugins/openAiReasoningVisionPlugin.js: -------------------------------------------------------------------------------- 1 | import OpenAIVisionPlugin from './openAiVisionPlugin.js'; 2 | 3 | class OpenAIReasoningVisionPlugin extends OpenAIVisionPlugin { 4 | 5 | async tryParseMessages(messages) { 6 | const parsedMessages = await super.tryParseMessages(messages); 7 | 8 | let newMessages = []; 9 | 10 | // System messages to developer: https://platform.openai.com/docs/guides/text-generation#messages-and-roles 11 | newMessages = parsedMessages.map(message => ({ 12 | ...message, 13 | role: message.role === 'system' ? 'developer' : message.role 14 | })).filter(message => ['user', 'assistant', 'developer', 'tool'].includes(message.role)); 15 | 16 | return newMessages; 17 | } 18 | 19 | async getRequestParameters(text, parameters, prompt) { 20 | const requestParameters = await super.getRequestParameters(text, parameters, prompt); 21 | 22 | const modelMaxReturnTokens = this.getModelMaxReturnTokens(); 23 | const maxTokensPrompt = this.promptParameters.max_tokens; 24 | const maxTokensModel = this.getModelMaxTokenLength() * (1 - this.getPromptTokenRatio()); 25 | 26 | const maxTokens = maxTokensPrompt || maxTokensModel; 27 | 28 | delete requestParameters.max_tokens; 29 | requestParameters.max_completion_tokens = maxTokens ? Math.min(maxTokens, modelMaxReturnTokens) : modelMaxReturnTokens; 30 | requestParameters.temperature = 1; 31 | 32 | if (this.promptParameters.reasoningEffort) { 33 | const effort = this.promptParameters.reasoningEffort.toLowerCase(); 34 | if (['high', 'medium', 'low'].includes(effort)) { 35 | requestParameters.reasoning_effort = effort; 36 | } else { 37 | requestParameters.reasoning_effort = 'low'; 38 | } 39 | } 40 | 41 | if (this.promptParameters.responseFormat) { 42 | requestParameters.response_format = this.promptParameters.responseFormat; 43 | } 44 | 45 | return requestParameters; 46 | } 47 | } 48 | 49 | export default OpenAIReasoningVisionPlugin; -------------------------------------------------------------------------------- /server/prompt.js: -------------------------------------------------------------------------------- 1 | class Prompt { 2 | constructor(params) { 3 | if (typeof params === 'string' || params instanceof String) { 4 | this.prompt = params; 5 | } else { 6 | const { prompt, saveResultTo, messages, context, examples } = params; 7 | this.prompt = prompt; 8 | this.saveResultTo = saveResultTo; 9 | this.messages = messages; 10 | this.context = context; 11 | this.examples = examples; 12 | this.params = params; 13 | } 14 | 15 | this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages) || 16 | (this.context && promptContains('text', this.context)) || 17 | (this.examples && promptContains('text', this.examples)); 18 | this.usesPreviousResult = promptContains('previousResult', this.prompt ? this.prompt : this.messages) || 19 | (this.context && promptContains('previousResult', this.context)) || 20 | (this.examples && promptContains('previousResult', this.examples)); 21 | this.debugInfo = ''; 22 | } 23 | } 24 | 25 | // function to check if a Handlebars template prompt contains a variable 26 | // can work with a single prompt or an array of messages 27 | function promptContains(variable, prompt) { 28 | const regexp = /{{+(.*?)}}+/g; 29 | let matches = []; 30 | let match; 31 | 32 | // if it's an array, it's either an OpenAI messages array or a PaLM messages 33 | // array or a PaLM examples array, all of which have a content property 34 | if (Array.isArray(prompt)) { 35 | prompt.forEach(p => { 36 | // eslint-disable-next-line no-cond-assign 37 | while (match = p.content && regexp.exec(p.content)) { 38 | matches.push(match[1]); 39 | } 40 | }); 41 | } else { 42 | while ((match = regexp.exec(prompt)) !== null) { 43 | matches.push(match[1]); 44 | } 45 | } 46 | 47 | const variables = matches.filter(function (varName) { 48 | return varName.indexOf("#") !== 0 && varName.indexOf("/") !== 0; 49 | }) 50 | 51 | return variables.includes(variable); 52 | } 53 | 54 | export { Prompt, promptContains }; -------------------------------------------------------------------------------- /server/pubsub.js: -------------------------------------------------------------------------------- 1 | import { PubSub } from 'graphql-subscriptions'; 2 | const pubsub = new PubSub(); 3 | pubsub.ee.setMaxListeners(300); 4 | 5 | export default pubsub; 6 | -------------------------------------------------------------------------------- /server/requestState.js: -------------------------------------------------------------------------------- 1 | const requestState = {}; // Stores the state of each request 2 | 3 | export { 4 | requestState 5 | }; -------------------------------------------------------------------------------- /server/resolver.js: -------------------------------------------------------------------------------- 1 | import { fulfillWithTimeout } from '../lib/promiser.js'; 2 | import { PathwayResolver } from './pathwayResolver.js'; 3 | 4 | // This resolver uses standard parameters required by Apollo server: 5 | // (parent, args, contextValue, info) 6 | const rootResolver = async (parent, args, contextValue, info) => { 7 | const { config, pathway } = contextValue; 8 | const { temperature, enableGraphqlCache } = pathway; 9 | 10 | // Turn on graphql caching if enableGraphqlCache true and temperature is 0 11 | if (enableGraphqlCache && temperature == 0) { // || 12 | info.cacheControl.setCacheHint({ maxAge: 60 * 60 * 24, scope: 'PUBLIC' }); 13 | } 14 | 15 | const pathwayResolver = new PathwayResolver({ config, pathway, args }); 16 | contextValue.pathwayResolver = pathwayResolver; 17 | 18 | // Execute the request with timeout 19 | let result = null; 20 | 21 | try { 22 | result = await fulfillWithTimeout(pathway.resolver(parent, args, contextValue, info), pathway.timeout); 23 | } catch (error) { 24 | pathwayResolver.logError(error); 25 | result = error.message || error.toString(); 26 | } 27 | 28 | const { warnings, errors, previousResult, savedContextId, tool } = pathwayResolver; 29 | 30 | // Add request parameters back as debug 31 | const debug = pathwayResolver.prompts.map(prompt => prompt.debugInfo || '').join('\n').trim(); 32 | 33 | return { debug, result, warnings, errors, previousResult, tool, contextId: savedContextId } 34 | } 35 | 36 | // This resolver is used by the root resolver to process the request 37 | const resolver = async (parent, args, contextValue, _info) => { 38 | const { pathwayResolver } = contextValue; 39 | return await pathwayResolver.resolve(args); 40 | } 41 | 42 | const cancelRequestResolver = (parent, args, contextValue, _info) => { 43 | const { requestId } = args; 44 | const { requestState } = contextValue; 45 | requestState[requestId] = { canceled: true }; 46 | return true 47 | } 48 | 49 | export { 50 | resolver, rootResolver, cancelRequestResolver 51 | }; 52 | -------------------------------------------------------------------------------- /server/subscriptions.js: -------------------------------------------------------------------------------- 1 | import pubsub from './pubsub.js'; 2 | import { withFilter } from 'graphql-subscriptions'; 3 | import { publishRequestProgressSubscription } from '../lib/redisSubscription.js'; 4 | import logger from '../lib/logger.js'; 5 | 6 | const subscriptions = { 7 | requestProgress: { 8 | subscribe: withFilter( 9 | (_, args, __, _info) => { 10 | logger.debug(`Client requested subscription for request ids: ${args.requestIds}`); 11 | publishRequestProgressSubscription(args.requestIds); 12 | return pubsub.asyncIterator(['REQUEST_PROGRESS']) 13 | }, 14 | (payload, variables) => { 15 | return ( 16 | variables.requestIds.includes(payload.requestProgress.requestId) 17 | ); 18 | }, 19 | ), 20 | }, 21 | }; 22 | 23 | export default subscriptions; 24 | -------------------------------------------------------------------------------- /start.js: -------------------------------------------------------------------------------- 1 | import startServerFactory from './index.js'; 2 | 3 | (async () => { 4 | const { startServer } = await startServerFactory(); 5 | startServer && startServer(); 6 | })(); -------------------------------------------------------------------------------- /tests/fastLruCache.test.js: -------------------------------------------------------------------------------- 1 | import test from 'ava'; 2 | import { FastLRUCache } from '../lib/fastLruCache.js'; 3 | 4 | test('FastLRUCache - get and put', t => { 5 | const cache = new FastLRUCache(2); 6 | 7 | cache.put(1, 1); 8 | cache.put(2, 2); 9 | 10 | t.is(cache.get(1), 1); // returns 1 11 | cache.put(3, 3); // evicts key 2 12 | t.is(cache.get(2), -1); // returns -1 (not found) 13 | cache.put(4, 4); // evicts key 1 14 | t.is(cache.get(1), -1); // returns -1 (not found) 15 | t.is(cache.get(3), 3); // returns 3 16 | t.is(cache.get(4), 4); // returns 4 17 | }); 18 | 19 | test('FastLRUCache - get non-existent key', t => { 20 | const cache = new FastLRUCache(2); 21 | t.is(cache.get(99), -1); // returns -1 (not found) 22 | }); 23 | 24 | test('FastLRUCache - update value of existing key', t => { 25 | const cache = new FastLRUCache(2); 26 | cache.put(1, 1); 27 | cache.put(1, 100); 28 | t.is(cache.get(1), 100); // returns updated value 100 29 | }); -------------------------------------------------------------------------------- /tests/handleBars.test.js: -------------------------------------------------------------------------------- 1 | // handleBars.test.js 2 | 3 | import test from 'ava'; 4 | import HandleBars from '../lib/handleBars.js'; 5 | 6 | test('stripHTML', (t) => { 7 | const stringWithHTML = '

    Hello, World!

    '; 8 | const expectedResult = 'Hello, World!'; 9 | 10 | const result = HandleBars.helpers.stripHTML(stringWithHTML); 11 | t.is(result, expectedResult); 12 | }); 13 | 14 | test('now', (t) => { 15 | const expectedResult = new Date().toISOString(); 16 | 17 | const result = HandleBars.helpers.now(); 18 | t.is(result.slice(0, 10), expectedResult.slice(0, 10)); // Comparing only the date part 19 | }); 20 | 21 | test('toJSON', (t) => { 22 | const object = { key: 'value' }; 23 | const expectedResult = '{"key":"value"}'; 24 | 25 | const result = HandleBars.helpers.toJSON(object); 26 | t.is(result, expectedResult); 27 | }); 28 | 29 | test('ctoW', (t) => { 30 | const value = 66; 31 | const expectedResult = 10; 32 | 33 | const result = HandleBars.helpers.ctoW(value); 34 | t.is(result, expectedResult); 35 | }); 36 | 37 | test('ctoW non-numeric', (t) => { 38 | const value = 'Hello, World!'; 39 | const expectedResult = 'Hello, World!'; 40 | 41 | const result = HandleBars.helpers.ctoW(value); 42 | t.is(result, expectedResult); 43 | }); -------------------------------------------------------------------------------- /tests/server.js: -------------------------------------------------------------------------------- 1 | import 'dotenv/config' 2 | import { ApolloServer } from '@apollo/server'; 3 | import { config } from '../config.js'; 4 | import typeDefsresolversFactory from '../index.js'; 5 | 6 | let typeDefs; 7 | let resolvers; 8 | 9 | const initTypeDefsResolvers = async () => { 10 | const result = await typeDefsresolversFactory(); 11 | typeDefs = result.typeDefs; 12 | resolvers = result.resolvers; 13 | }; 14 | 15 | export const startTestServer = async () => { 16 | await initTypeDefsResolvers(); 17 | 18 | return new ApolloServer({ 19 | typeDefs, 20 | resolvers, 21 | context: () => ({ config, requestState: {} }), 22 | }); 23 | }; --------------------------------------------------------------------------------