├── samples
├── content_generator_ui
│ ├── styles.css
│ ├── README.md
│ ├── app.js
│ ├── index.html
│ └── front.js
├── frontend
│ ├── screenshot.png
│ └── README.md
├── lambda_llama_sagemaker
│ ├── llama-endpoint-v1.zip
│ └── README.md
└── command_sample
│ ├── env_example
│ ├── package.json
│ ├── test_chatbot_gemini.js
│ ├── test_speech_models.js
│ ├── test_semantic_search.js
│ ├── test_azure_chatbot.js
│ ├── test_image_models.js
│ ├── test_language_models.js
│ ├── test_chatbot_proxy.js
│ ├── test_hugging_face.js
│ ├── test_semantic_search_pagination.js
│ ├── test_chat_context.js
│ ├── shiba_image_generator.js
│ ├── test_finetuning.js
│ ├── test_chatbot.js
│ ├── test_text_analyzer.js
│ ├── test_chatbot_nvidia.js
│ ├── test_llm_evaluation.test.js
│ ├── test_chatbot_cohere.js
│ ├── README.md
│ ├── test_llama_chatbot.js
│ ├── automate_s3_bucket.js
│ └── ecommerce_tool.js
├── .gitattributes
├── IntelliNode
├── .npmignore
├── resource
│ └── templates
│ │ ├── summary_prompt.in
│ │ ├── prompt_example.in
│ │ ├── sentiment_prompt.in
│ │ ├── augmented_chatbot.in
│ │ ├── html_page_prompt.in
│ │ ├── instruct_update.in
│ │ └── graph_dashboard_prompt.in
├── utils
│ ├── ModelEvaluation.js
│ ├── FileHelper.js
│ ├── Config2.js
│ ├── AudioHelper.js
│ ├── MatchHelpers.js
│ ├── Prompt.js
│ ├── SystemHelper.js
│ ├── ConnHelper.js
│ ├── FetchClient.js
│ ├── ChatContext.js
│ └── MCPClient.js
├── env_example
├── model
│ └── input
│ │ ├── FineTuneInput.js
│ │ ├── FunctionModelInput.js
│ │ ├── EmbedInput.js
│ │ ├── LanguageModelInput.js
│ │ ├── Text2SpeechInput.js
│ │ └── ImageModelInput.js
├── test
│ ├── unit
│ │ ├── Prompt.test.js
│ │ ├── testRunner.js
│ │ ├── StabilityAIWrapper.test.js
│ │ ├── HuggingWrapper.test.js
│ │ ├── CohereAIWrapper.test.js
│ │ ├── OpenAIWrapper.test.js
│ │ └── GoogleAIWrapper.test.js
│ └── integration
│ │ ├── LanguageModelInput.test.js
│ │ ├── GoogleAIWrapper.test.js
│ │ ├── RemoteEmbedModelVLLM.test.js
│ │ ├── AnthropicWrapper.test.js
│ │ ├── Prompt.test.js
│ │ ├── AzureRemoteEmbedModel.test.js
│ │ ├── AWSLLamav2.test.js
│ │ ├── HuggingWrapper.test.js
│ │ ├── MistralAIWrapper.test.js
│ │ ├── StabilityAIWrapper.test.js
│ │ ├── RemoteFineTune.test.js
│ │ ├── SemanticSearch.test.js
│ │ ├── SemanticSearchPaging.test.js
│ │ ├── AzureChatContext.test.js
│ │ ├── RemoteLanguageModel.test.js
│ │ ├── intellicloudWrapper.test.js
│ │ ├── ChatbotNvidia.test.js
│ │ ├── CustomGen.test.js
│ │ ├── ChatbotVLLM.test.js
│ │ ├── AzureOpenAIWrapper.test.js
│ │ ├── RemoteSpeechModel.test.js
│ │ ├── TextAnalyzer.test.js
│ │ ├── RemoteEmbedModel.test.js
│ │ ├── ChatbotCohere.test.js
│ │ ├── NvidiaWrapper.test.js
│ │ ├── GeminiAIWrapper.test.js
│ │ ├── GenNvidia.test.js
│ │ ├── NvidiaNimWrapper.test.js
│ │ ├── StabilityAIWrapperStyle.js
│ │ ├── CohereAIWrapper.test.js
│ │ ├── ModelEvaluation.test.js
│ │ └── VLLMWrapper.test.js
├── wrappers
│ ├── AWSEndpointWrapper.js
│ ├── MistralAIWrapper.js
│ ├── AnthropicWrapper.js
│ ├── ReplicateWrapper.js
│ ├── VLLMWrapper.js
│ ├── IntellicloudWrapper.js
│ ├── HuggingWrapper.js
│ ├── CohereAIWrapper.js
│ ├── GoogleAIWrapper.js
│ ├── GeminiAIWrapper.js
│ └── NvidiaWrapper.js
├── reference_scripts.txt
├── package.json
├── function
│ ├── SemanticSearchPaging.js
│ ├── TextAnalyzer.js
│ └── SemanticSearch.js
└── controller
│ ├── RemoteSpeechModel.js
│ ├── RemoteLanguageModel.js
│ ├── RemoteFineTuneModel.js
│ └── RemoteImageModel.js
├── images
├── multimodel-banner.png
├── intellinode_new_header.png
├── llama_sagemaker
│ ├── s5_gateway.png
│ ├── s1_sagemaker.png
│ ├── s2_jumpstart.png
│ ├── s3_endpoint.png
│ ├── step_domain.png
│ └── s4_lambda_trigger.png
└── model_output
│ ├── gaming-chair.png
│ ├── register-page.png
│ ├── gaming-chair-xl.png
│ └── graphs_example.png
└── .gitignore
/samples/content_generator_ui/styles.css:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/IntelliNode/.npmignore:
--------------------------------------------------------------------------------
1 | .env
2 | node_modules
3 | .idea
4 | dist
5 | test/integration/
6 |
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/summary_prompt.in:
--------------------------------------------------------------------------------
1 | Provide a short summary of the following text:\n\n${text}\n\nSummary:
--------------------------------------------------------------------------------
/images/multimodel-banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/multimodel-banner.png
--------------------------------------------------------------------------------
/samples/frontend/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/samples/frontend/screenshot.png
--------------------------------------------------------------------------------
/images/intellinode_new_header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/intellinode_new_header.png
--------------------------------------------------------------------------------
/images/llama_sagemaker/s5_gateway.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/s5_gateway.png
--------------------------------------------------------------------------------
/images/model_output/gaming-chair.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/model_output/gaming-chair.png
--------------------------------------------------------------------------------
/images/model_output/register-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/model_output/register-page.png
--------------------------------------------------------------------------------
/images/llama_sagemaker/s1_sagemaker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/s1_sagemaker.png
--------------------------------------------------------------------------------
/images/llama_sagemaker/s2_jumpstart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/s2_jumpstart.png
--------------------------------------------------------------------------------
/images/llama_sagemaker/s3_endpoint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/s3_endpoint.png
--------------------------------------------------------------------------------
/images/llama_sagemaker/step_domain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/step_domain.png
--------------------------------------------------------------------------------
/images/model_output/gaming-chair-xl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/model_output/gaming-chair-xl.png
--------------------------------------------------------------------------------
/images/model_output/graphs_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/model_output/graphs_example.png
--------------------------------------------------------------------------------
/IntelliNode/utils/ModelEvaluation.js:
--------------------------------------------------------------------------------
1 | class ModelEvaluation {
2 |
3 | constructor() {}
4 | }
5 |
6 | module.exports = {
7 | ModelEvaluation
8 | };
--------------------------------------------------------------------------------
/images/llama_sagemaker/s4_lambda_trigger.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/images/llama_sagemaker/s4_lambda_trigger.png
--------------------------------------------------------------------------------
/samples/lambda_llama_sagemaker/llama-endpoint-v1.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intelligentnode/IntelliNode/HEAD/samples/lambda_llama_sagemaker/llama-endpoint-v1.zip
--------------------------------------------------------------------------------
/samples/command_sample/env_example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=
2 | COHERE_API_KEY=
3 | GOOGLE_API_KEY=
4 | STABILITY_API_KEY=
5 | HUGGING_API_KEY=
6 | AZURE_OPENAI_API_KEY=
7 | AWS_ACCESS_KEY_ID=
8 | AWS_SECRET_ACCESS_KEY=
9 | REPLICATE_API_KEY=
10 | AWS_API_URL=
11 |
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/prompt_example.in:
--------------------------------------------------------------------------------
1 | Example of good prompt engineering response:
2 |
3 | User: Create a prompt: to {query} from {context}.
4 |
5 | Assistant: Given the following context:
6 |
7 | context:
8 | ---------
9 | ${context}
10 |
11 | Extract the specific information denoted by the query: ${query} from the provided context.
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/sentiment_prompt.in:
--------------------------------------------------------------------------------
1 | Outputs sentiment analysis results in a standardized JSON format with sentiment values 'positive', 'negative', or 'neutral'. The format of the output should follow json and you can add muti sentiments i needed. output format should be always {"results": {"positive": 0 or 1, "negative": 0 or 1, "neutral": 0 or 1}}}}
--------------------------------------------------------------------------------
/IntelliNode/env_example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=
2 | COHERE_API_KEY=
3 | GOOGLE_API_KEY=
4 | STABILITY_API_KEY=
5 | HUGGING_API_KEY=
6 | AZURE_OPENAI_API_KEY=
7 | AWS_ACCESS_KEY_ID=
8 | AWS_SECRET_ACCESS_KEY=
9 | REPLICATE_API_KEY=
10 | MISTRAL_API_KEY=
11 | AWS_API_URL=
12 | INTELLI_API_BASE=
13 | GEMINI_API_KEY=
14 | # VLLM
15 | VLLM_EMBED_URL=
16 | DEEPSEEK_VLLM_URL=
17 | GEMMA_VLLM_URL=
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/augmented_chatbot.in:
--------------------------------------------------------------------------------
1 | Using the provided context, craft a cohesive response that directly addresses the user's query. If the context lacks relevance or is absent, focus on generating a knowledgeable and accurate answer based on the user's question alone. Aim for clarity and conciseness in your reply.
2 | Context:
3 | ${semantic_search}
4 | ---------------------------------
5 | User's Question:
6 | ${user_query}
--------------------------------------------------------------------------------
/samples/content_generator_ui/README.md:
--------------------------------------------------------------------------------
1 | # IntelliNode / UI Sample
2 |
3 | Generate marketing content for various products including text description, images, and audio.
4 |
5 | ## Setup Steps
6 | 1. Initiate the project:
7 | ```
8 | npm init -y
9 | npm i intellinode
10 | npm i express
11 | ```
12 | 2. Replace the API keys inside the `app.js` file.
13 |
14 | 3. Run the app:
15 | ```
16 | node app.js
17 | ```
18 |
19 | 4. Open your browser and navigate to `http://localhost:3000`.
20 |
--------------------------------------------------------------------------------
/IntelliNode/utils/FileHelper.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 |
3 |
4 | class FileHelper {
5 |
6 | static writeDataToFile(filePath, data) {
7 | fs.writeFileSync(filePath, data);
8 | }
9 |
10 | static readData(filePath, fileFormat) {
11 | return fs.readFileSync(filePath, fileFormat)
12 | }
13 |
14 | static createReadStream(filePath) {
15 | return fs.createReadStream(filePath)
16 | }
17 |
18 | }
19 |
20 | module.exports = FileHelper
21 |
--------------------------------------------------------------------------------
/IntelliNode/model/input/FineTuneInput.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class FineTuneInput {
9 | constructor({ training_file, model }) {
10 | this.training_file = training_file
11 | this.model = model
12 | }
13 |
14 | getOpenAIInput() {
15 | const params = {
16 | training_file: this.training_file,
17 | model: this.model,
18 | };
19 | return params;
20 | }
21 | }
22 |
23 | module.exports = FineTuneInput;
24 |
--------------------------------------------------------------------------------
/IntelliNode/test/unit/Prompt.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const SystemHelper = require("../../utils/SystemHelper");
3 | const Prompt = require("../../utils/Prompt");
4 |
5 |
6 | function testPrompt() {
7 | const text = 'sample text';
8 | const template = new SystemHelper().loadPrompt("html_page");
9 | const promptTemp = new Prompt(template);
10 |
11 | const formattedText = promptTemp.format({'text': text});
12 |
13 | assert(formattedText.includes(text), 'Formatted text does not contain the sample text');
14 |
15 | }
16 |
17 | module.exports = testPrompt;
--------------------------------------------------------------------------------
/IntelliNode/test/integration/LanguageModelInput.test.js:
--------------------------------------------------------------------------------
1 | const LanguageModelInput = require("../../model/input/LanguageModelInput");
2 |
3 | function testOpenAIInputs() {
4 | const input = new LanguageModelInput({
5 | prompt: "Once upon a time",
6 | });
7 |
8 | input.setDefaultValues("openai");
9 |
10 | const openAIInputs = input.getOpenAIInputs();
11 |
12 | if (openAIInputs.prompt !== "Once upon a time") {
13 | console.error("Error: Prompt value is incorrect.");
14 | } else {
15 | console.log("OpenAI inputs:", openAIInputs);
16 | }
17 | }
18 |
19 | testOpenAIInputs();
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/html_page_prompt.in:
--------------------------------------------------------------------------------
1 | Generate website, javascript and css in one page based on the user request.
2 |
3 | Output format:
4 | {"html": "
[generated head content][generated body content]", "message"":"the page ready for render"}
5 |
6 | Ensure the page is compatible with screen sizes and use ready bootstrap component when needed.
7 |
8 | If an image generated, add a clear image description in the alt to use for image generation:
9 |
10 |
11 | user request: ${text}
12 |
13 | output:
--------------------------------------------------------------------------------
/samples/lambda_llama_sagemaker/README.md:
--------------------------------------------------------------------------------
1 | # IntelliNode - Llama Connector
2 |
3 | The Llama v2 chatbot is avaialble as part of the Amazon SageMaker JumpStart. Once you have deployed your Llama model, you'll need to create a Lambda function to connect the model, then trigger the lambda from the API gateway.
4 |
5 | This folder contains a Lambda export which establishes a connection to your Sagemaker Llama deployment.
6 |
7 | ### Pre-request
8 | - Create a SageMaker domain.
9 | - Deploy the llama model using SageMaker Jumpstart.
10 | - Copy the endpoint name.
11 | - Create node.js lambda function.
12 | - Create environment variable `llama_endpoint` with the SageMaker endpoint value.
13 | - Import the zip file in your lambda function.
14 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/GoogleAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const GoogleAIWrapper = require('../../wrappers/GoogleAIWrapper');
3 |
4 | const googleAI = new GoogleAIWrapper(process.env.GOOGLE_API_KEY);
5 |
6 | async function testGenerateSpeech() {
7 | try {
8 | const params = {
9 | text: 'Welcome to IntelliNode',
10 | languageCode: 'en-US',
11 | name: 'en-US-Wavenet-A',
12 | ssmlGender: 'MALE',
13 | };
14 |
15 | const result = await googleAI.generateSpeech(params);
16 | console.log('Generate Speech Result:', result['audioContent']);
17 | } catch (error) {
18 | console.error('Generate Speech Error:', error);
19 | }
20 | }
21 |
22 | (async () => {
23 | await testGenerateSpeech();
24 | })();
--------------------------------------------------------------------------------
/samples/command_sample/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "command_sample",
3 | "version": "0.0.1",
4 | "description": "access any AI model using IntelliNode.",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "repository": {
10 | "type": "git",
11 | "url": "git+https://github.com/Barqawiz/IntelliNode.git"
12 | },
13 | "keywords": [
14 | "ai"
15 | ],
16 | "author": "Ahmad Albarqawi",
17 | "license": "ISC",
18 | "bugs": {
19 | "url": "https://github.com/Barqawiz/IntelliNode/issues"
20 | },
21 | "homepage": "https://github.com/Barqawiz/IntelliNode#readme",
22 | "dependencies": {
23 | "@aws-sdk/client-s3": "^3.363.0",
24 | "dotenv": "^16.0.3",
25 | "intellinode": "^2.2.9"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/instruct_update.in:
--------------------------------------------------------------------------------
1 | Update the model output and make sure to maintain the format. Don't use the example content with the user message.
2 | Return all the model generated after applying the instructions.
3 |
4 | Example:
5 | the model generated json html output: ###{"html": "Title1
"}###
6 | the user update instructions: ###change to Title2###
7 | output: {"html": "Title2
"}
8 | ----
9 | the model generated text output: ###Text1 example bla bla###
10 | the user update instructions: ###change to text2###
11 | output: Text2 example bla bla
12 | ===
13 | User message:
14 | the model generated ${type} output: ###${model_output}###
15 | the user update instructions: ###${user_instruction}###
16 | output:
--------------------------------------------------------------------------------
/IntelliNode/utils/Config2.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const FileHelper = require('./FileHelper')
9 | const path = require('path');
10 |
11 | // WARNING: This file is deprecated
12 | class Config2 {
13 | constructor() {
14 | const configPath = path.join(__dirname, '..', 'config.json');
15 | this.config = JSON.parse(FileHelper.readData(configPath, 'utf-8'));
16 | }
17 |
18 | getProperty(key) {
19 | return key.split('.').reduce((obj, k) => (obj && obj[k] !== null && obj[k] !== 'undefined') ? obj[k] : null, this.config);
20 | }
21 |
22 | static getInstance() {
23 | if (!Config2.instance) {
24 | Config2.instance = new Config2();
25 | }
26 | return Config2.instance;
27 | }
28 | }
29 |
30 | module.exports = Config2;
--------------------------------------------------------------------------------
/IntelliNode/wrappers/AWSEndpointWrapper.js:
--------------------------------------------------------------------------------
1 | const FetchClient = require('../utils/FetchClient');
2 |
3 | class AWSEndpointWrapper {
4 | constructor(apiUrl, apiKey = null) {
5 | this.API_BASE_URL = apiUrl;
6 |
7 | let headers = {
8 | 'Content-Type': 'application/json',
9 | };
10 |
11 | if (apiKey) {
12 | headers['Authorization'] = `Bearer ${apiKey}`;
13 | }
14 |
15 | // Create our FetchClient with the base url + default headers
16 | this.client = new FetchClient({
17 | baseURL: this.API_BASE_URL,
18 | headers: headers,
19 | });
20 | }
21 |
22 | async predict(inputData) {
23 | try {
24 | return await this.client.post('', inputData);
25 | } catch (error) {
26 | throw error; // You can wrap this in a custom error message if you wish
27 | }
28 | }
29 | }
30 |
31 | module.exports = AWSEndpointWrapper;
32 |
--------------------------------------------------------------------------------
/samples/command_sample/test_chatbot_gemini.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, GeminiInput, SupportedChatModels } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callChatbot(apiKey, provider) {
7 | const chatbot = new Chatbot(apiKey, provider);
8 |
9 | const system = 'You are a helpful assistant.';
10 | const input = new GeminiInput(system);
11 | input.addUserMessage('What is the distance between the Earth and the Moon?');
12 | // console.log(input.messages);
13 |
14 | const responses = await chatbot.chat(input);
15 |
16 | console.log(`Chatbot responses (${provider}):`);
17 | responses.forEach(response => console.log('- ', response));
18 | }
19 |
20 | (async () => {
21 | // Test chatbot using OpenAI
22 | console.log('test the chat function')
23 | await callChatbot(process.env.GEMINI_API_KEY, SupportedChatModels.GEMINI);
24 | })();
25 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/RemoteEmbedModelVLLM.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { RemoteEmbedModel, SupportedEmbedModels } = require('../../controller/RemoteEmbedModel');
4 | const EmbedInput = require('../../model/input/EmbedInput');
5 |
6 | const embedUrl = process.env.VLLM_EMBED_URL;
7 | const embedModel = new RemoteEmbedModel(null, SupportedEmbedModels.VLLM, { baseUrl: embedUrl });
8 |
9 | async function testVLLMEmbed() {
10 | const input = new EmbedInput({ texts: ["Hello world"] });
11 | const result = await embedModel.getEmbeddings(input);
12 |
13 | console.log('Embedding result:', result);
14 |
15 | assert.strictEqual(result.length, 1, 'Should return exactly one embedding.');
16 | assert(result[0].embedding.length > 0, 'Embedding should not be empty.');
17 | console.log('Embedding length:', result[0].embedding.length);
18 | }
19 |
20 | (async () => {
21 | await testVLLMEmbed();
22 | })();
23 |
--------------------------------------------------------------------------------
/IntelliNode/utils/AudioHelper.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const FileHelper = require('./FileHelper')
9 |
10 | class AudioHelper {
11 | constructor() {
12 | this.isLog = true;
13 | }
14 |
15 | decode(audioContent) {
16 | const buff = Buffer.from(audioContent, 'base64');
17 | return buff;
18 | }
19 |
20 | saveAudio(decodedAudio, directory, fileName) {
21 | if (!fileName.endsWith('.mp3') && !fileName.endsWith('.wav')) {
22 | if (this.isLog) console.error('Unsupported audio format: send mp3 or wav');
23 | return false;
24 | }
25 |
26 | try {
27 | const filePath = `${directory}/${fileName}`;
28 | FileHelper.writeDataToFile(filePath, decodedAudio);
29 | return true;
30 | } catch (error) {
31 | if (this.isLog) console.error(error);
32 | return false;
33 | }
34 | }
35 | }
36 |
37 | module.exports = AudioHelper;
38 |
--------------------------------------------------------------------------------
/samples/command_sample/test_speech_models.js:
--------------------------------------------------------------------------------
1 | const { RemoteSpeechModel, Text2SpeechInput, AudioHelper } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | const audioHelper = new AudioHelper();
7 |
8 | async function generateSpeech(apiKey, text, language) {
9 | const speechModel = new RemoteSpeechModel(apiKey);
10 | const input = new Text2SpeechInput({ text: text, language: language });
11 | const audioContent = await speechModel.generateSpeech(input);
12 |
13 | const decodedAudio = audioHelper.decode(audioContent);
14 |
15 | const saved = audioHelper.saveAudio(decodedAudio, './temp', 'temp.mp3');
16 | console.log(`Audio file saved: ${saved}`);
17 | console.log('check the temp folder')
18 |
19 | }
20 |
21 | (async () => {
22 | // Generate speech
23 | const apiKey = process.env.GOOGLE_API_KEY;
24 | const text = 'Welcome to Intelligent Node';
25 | const language = 'en-gb';
26 |
27 | await generateSpeech(apiKey, text, language);
28 | })();
29 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/AnthropicWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const AnthropicWrapper = require('../../wrappers/AnthropicWrapper');
4 |
5 | // initiate anthropic object
6 | const anthropic = new AnthropicWrapper(process.env.ANTHROPIC_API_KEY);
7 |
8 | async function testAnthropicGenerate() {
9 | try {
10 | const params = {
11 | "model": "claude-3-sonnet-20240229",
12 | "messages": [
13 | {
14 | "role": "user",
15 | "content": "Who is the most renowned French painter? Provide a single direct short answer."
16 | }
17 | ],
18 | "max_tokens": 256
19 | };
20 |
21 | const result = await anthropic.generateText(params);
22 | console.log('Anthropic Language Model Result:', result.content[0].text);
23 | } catch (error) {
24 | console.error('Anthropic Language Model Error:', error);
25 | }
26 | }
27 |
28 | (async () => {
29 | await testAnthropicGenerate();
30 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/unit/testRunner.js:
--------------------------------------------------------------------------------
1 | const testCohereAIWrapper = require('./CohereAIWrapper.test');
2 | const testGoogleAIWrapper = require('./GoogleAIWrapper.test');
3 | const testHuggingWrapper = require('./HuggingWrapper.test');
4 | const {testOpenAIWrapper, testOpenAIOrganization} = require('./OpenAIWrapper.test');
5 | const testStabilityAIWrapper = require('./StabilityAIWrapper.test');
6 | const testPrompt = require('./Prompt.test');
7 | const IntelliNode = require('../../index');
8 |
9 | console.log('Sanity Check...');
10 | console.log(Object.keys(IntelliNode));
11 |
12 | console.log('Running Prompt unit tests...');
13 | testPrompt();
14 |
15 | console.log('Running CohereAIWrapper unit tests...');
16 | testCohereAIWrapper();
17 |
18 | console.log('Running GoogleAIWrapper unit tests...');
19 | testGoogleAIWrapper();
20 |
21 | console.log('Running HuggingWrapper unit tests...');
22 | testHuggingWrapper();
23 |
24 | console.log('Running OpenAIWrapper unit tests...');
25 | testOpenAIWrapper();
26 | testOpenAIOrganization();
27 |
28 | console.log('Running Stability unit tests...');
29 | testStabilityAIWrapper()
30 |
--------------------------------------------------------------------------------
/samples/command_sample/test_semantic_search.js:
--------------------------------------------------------------------------------
1 | const { SemanticSearch } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callSemanticSearch(apiKey, provider) {
7 | const pivotItem = 'Hello from OpenAI!';
8 | const searchArray = ['Greetings from OpenAI!', 'Bonjour de OpenAI!', 'Hola desde OpenAI!'];
9 | const numberOfMatches = 2;
10 |
11 | const search = new SemanticSearch(apiKey, provider);
12 |
13 | const results = await search.getTopMatches(pivotItem, searchArray, numberOfMatches);
14 | console.log('OpenAI Semantic Search Results:', results);
15 | console.log('top matches:', search.filterTopMatches(results, searchArray));
16 | }
17 |
18 | (async () => {
19 |
20 | // Test the search using openAI
21 | console.log('### Openai semantic search ###')
22 | await callSemanticSearch(process.env.OPENAI_API_KEY, 'openai');
23 |
24 | // Test the search using cohere
25 | console.log('\n### Cohere semantic search ###')
26 | await callSemanticSearch(process.env.COHERE_API_KEY, 'cohere');
27 |
28 | })();
29 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/Prompt.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require("assert");
3 | const Prompt = require("../../utils/Prompt");
4 |
5 | const openaiApiKey = process.env.OPENAI_API_KEY;
6 |
7 | async function testGeneratedPrompt1() {
8 | const promptTemp = await Prompt.fromChatGPT("generate a fantasy image with ninja jumping across buildings", openaiApiKey);
9 |
10 | const promptString = promptTemp.getInput();
11 |
12 | console.log("the generated prompt: ", promptString);
13 |
14 | assert(promptString.length > 0, "Test failed");
15 | }
16 |
17 | async function testGeneratedPrompt2() {
18 | const promptTemp = await Prompt.fromChatGPT("information retrieval about {reference} text with ${query} user input", openaiApiKey);
19 |
20 | const promptString = promptTemp.getInput();
21 |
22 | console.log("the generated prompt: ", promptString);
23 |
24 | assert(promptString.length > 0, "Test failed");
25 | }
26 |
27 |
28 | (async () => {
29 | console.log('test prompt 1:');
30 | await testGeneratedPrompt1();
31 |
32 | console.log('test prompt 2:');
33 | await testGeneratedPrompt2();
34 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/unit/StabilityAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const StabilityAIWrapper = require('../../wrappers/StabilityAIWrapper');
3 | const config = require('../../config.json');
4 |
5 | function testStabilityAIWrapper() {
6 | const apiKey = 'your-api-key';
7 | const stabilityAIWrapper = new StabilityAIWrapper(apiKey);
8 |
9 | assert.strictEqual(
10 | stabilityAIWrapper.API_KEY,
11 | apiKey,
12 | 'API key should be set'
13 | );
14 | assert.ok(
15 | stabilityAIWrapper.client,
16 | 'httpClient should be created'
17 | );
18 |
19 | // Test httpClient configuration
20 | const expectedBaseURL = config.url.stability.base;
21 | const expectedAuthHeader = `Bearer ${apiKey}`;
22 |
23 | assert.strictEqual(
24 | stabilityAIWrapper.client.baseURL,
25 | expectedBaseURL,
26 | 'httpClient baseURL should be set correctly'
27 | );
28 | assert.strictEqual(
29 | stabilityAIWrapper.client.defaultHeaders['Authorization'],
30 | expectedAuthHeader,
31 | 'httpClient Authorization header should be set correctly'
32 | );
33 | }
34 |
35 | module.exports = testStabilityAIWrapper;
36 |
--------------------------------------------------------------------------------
/samples/command_sample/test_azure_chatbot.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, ChatGPTInput, ChatGPTMessage } = require('intellinode');
2 | const { ProxyHelper } = require('intellinode');
3 |
4 | // below imports to call the keys from .env file
5 | const dotenv = require('dotenv');
6 | dotenv.config();
7 |
8 | async function callChatbot(apiKey, provider, modelName) {
9 | const chatbot = new Chatbot(apiKey, provider);
10 |
11 | const system = 'You are a helpful assistant.';
12 | const input = new ChatGPTInput(system);
13 | input.addUserMessage('What is the distance between the Earth and the Moon?');
14 | input.numberOfOutputs = 1;
15 | input.model = modelName
16 |
17 | const responses = await chatbot.chat(input);
18 |
19 | console.log(`Chatbot responses (${provider}):`);
20 | responses.forEach(response => console.log('- ', response));
21 | }
22 |
23 | (async () => {
24 |
25 | const args = process.argv.slice(2);
26 | const resourceName = args[0];
27 | const modelName = args[1];
28 | ProxyHelper.getInstance().setAzureOpenai(resourceName);
29 | // Test chatbot using OpenAI
30 | await callChatbot(process.env.AZURE_OPENAI_API_KEY, 'openai', modelName);
31 | })();
32 |
--------------------------------------------------------------------------------
/IntelliNode/resource/templates/graph_dashboard_prompt.in:
--------------------------------------------------------------------------------
1 | Generate an HTML dashboard using chart.js with ${count} graphs about the ${topic} topic from the provided data. Each graph should showcase the relationships between selected columns, ensuring the graphs are relevant to the topic.
2 |
3 | Output example:
4 | [{
5 | "html": "[Insert required styles and scripts for Chart.js][Include code for all the ${count} graphs]",
6 | "message": "the page ready to render"
7 | }]
8 |
9 | Follow these instructions:
10 | ---
11 | 1. Return a single JSON response in the style shown in the output example.
12 | 2. Use Chart.js for generating the graphs wherever possible.
13 | 3. Use \" before any generated double quotation marks to ensure a valid JSON response.
14 | 4. Design elegant, modern dashboard charts based on the provided data.
15 | 5. Make sure the response is a valid JSON containing the complete HTML content.
16 | 6. Ensure the response will not truncate in any circumstance.
17 | 7. Select sample of the data based on the user instructions ensuring it fit in one page.
18 | 8. Reply only with generated code.
19 |
20 | User data: ###${text}###
--------------------------------------------------------------------------------
/IntelliNode/reference_scripts.txt:
--------------------------------------------------------------------------------
1 | ## Test wrapper functions ##
2 |
3 | 1- create a .env file in the root directory with keys:
4 | OPENAI_API_KEY=
5 | COHERE_API_KEY=
6 | GOOGLE_API_KEY=
7 | STABILITY_API_KEY=
8 |
9 |
10 | 2- run openAI wrapper unit test cases:
11 | node test/integration/OpenAIWrapper.test.js
12 |
13 |
14 | 3- run cohere wrapper unit test cases:
15 | node test/integration/CohereAIWrapper.test.js
16 |
17 |
18 | 4- run Google wrapper unit test cases:
19 | node test/integration/GoogleAIWrapper.test.js
20 |
21 |
22 | ## Input objects ##
23 |
24 | 1- Language model input test cases:
25 | node test/integration/LanguageModelInput.test.js
26 |
27 |
28 |
29 | ## Remote models ##
30 | 1- run the remote language models test cases:
31 | node test/integration/RemoteLanguageModel.test.js
32 |
33 |
34 | 2- run the remote image models test cases:
35 | node test/integration/RemoteImageModel.test.js
36 |
37 |
38 | 3- run the remote speech models test cases:
39 | node test/integration/RemoteSpeechModel.test.js
40 |
41 |
42 | 4- run the chatBot test cases:
43 | node test/integration/Chatbot.test.js
44 |
45 | # publish command
46 | # npm publish
--------------------------------------------------------------------------------
/IntelliNode/model/input/FunctionModelInput.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class FunctionModelInput {
9 | /**
10 | * Function input constructor.
11 | * @param {string} name - The name of the function.
12 | * @param {string} [description] - The description of the function. (Optional)
13 | * @param {object} [parameters] - The parameters of the function. (Optional)
14 | * @param {string} [parameters.type] - The data type of the parameters.
15 | * @param {object} [parameters.properties] - The properties or fields of the parameters.
16 | * @param {string[]} [parameters.required] - The required properties. (Optional)
17 | */
18 | constructor(name, description, parameters) {
19 | this.name = name;
20 | this.description = description || '';
21 | this.parameters = parameters || {type: 'object', properties: {}};
22 | }
23 |
24 | getFunctionModelInput() {
25 | return {
26 | name: this.name,
27 | description: this.description,
28 | parameters: this.parameters,
29 | };
30 | }
31 | }
32 |
33 | module.exports = FunctionModelInput ;
34 |
--------------------------------------------------------------------------------
/IntelliNode/wrappers/MistralAIWrapper.js:
--------------------------------------------------------------------------------
1 | /*Apache License
2 | Copyright 2023 Github.com/Barqawiz/IntelliNode*/
3 | const config = require('../config.json');
4 | const connHelper = require('../utils/ConnHelper');
5 | const FetchClient = require('../utils/FetchClient');
6 |
7 | class MistralAIWrapper {
8 | constructor(apiKey) {
9 | this.API_BASE_URL = config.url.mistral.base;
10 |
11 | this.client = new FetchClient({
12 | baseURL: this.API_BASE_URL,
13 | headers: {
14 | 'Content-Type': 'application/json',
15 | Accept: 'application/json',
16 | Authorization: `Bearer ${apiKey}`
17 | }
18 | });
19 | }
20 |
21 | async generateText(params) {
22 | const endpoint = config.url.mistral.completions;
23 | try {
24 | return await this.client.post(endpoint, params);
25 | } catch (error) {
26 | throw new Error(connHelper.getErrorMessage(error));
27 | }
28 | }
29 |
30 | async getEmbeddings(params) {
31 | const endpoint = config.url.mistral.embed;
32 | try {
33 | return await this.client.post(endpoint, params);
34 | } catch (error) {
35 | throw new Error(connHelper.getErrorMessage(error));
36 | }
37 | }
38 | }
39 |
40 | module.exports = MistralAIWrapper;
41 |
--------------------------------------------------------------------------------
/IntelliNode/wrappers/AnthropicWrapper.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const config = require('../config.json');
9 | const connHelper = require('../utils/ConnHelper');
10 | const FetchClient = require('../utils/FetchClient');
11 |
12 | class AnthropicWrapper {
13 | constructor(apiKey) {
14 | this.API_BASE_URL = config.url.anthropic.base;
15 | this.API_VERSION = config.url.anthropic.version;
16 |
17 | // Create our FetchClient instance
18 | this.client = new FetchClient({
19 | baseURL: this.API_BASE_URL,
20 | headers: {
21 | 'Content-Type': 'application/json',
22 | Accept: 'application/json',
23 | 'x-api-key': apiKey,
24 | 'anthropic-version': this.API_VERSION,
25 | },
26 | });
27 | }
28 |
29 | async generateText(params) {
30 | const endpoint = config.url.anthropic.messages;
31 |
32 | try {
33 | // Use the client’s post method
34 | return await this.client.post(endpoint, params);
35 | } catch (error) {
36 | throw new Error(connHelper.getErrorMessage(error));
37 | }
38 | }
39 | }
40 |
41 | module.exports = AnthropicWrapper;
42 |
--------------------------------------------------------------------------------
/IntelliNode/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "intellinode",
3 | "version": "2.3.0",
4 | "description": "Create AI agents using the latest models, including ChatGPT, Llama, Diffusion, Cohere, Gemini, and Hugging Face.",
5 | "main": "index.js",
6 | "keywords": [
7 | "ai",
8 | "ChatGPT",
9 | "stable diffusion",
10 | "openai",
11 | "huggingface",
12 | "Llama",
13 | "image generation",
14 | "speech synthesis",
15 | "prompt",
16 | "automation",
17 | "mistral",
18 | "gemini",
19 | "deepseek",
20 | "framework",
21 | "mcp"
22 | ],
23 | "author": "IntelliNode",
24 | "license": "Apache",
25 | "repository": {
26 | "type": "git",
27 | "url": "https://github.com/intelligentnode/IntelliNode.git"
28 | },
29 | "scripts": {
30 | "build": "browserify index.js --standalone IntelliNode -o front/intellinode.js && uglifyjs front/intellinode.js -o front/intellinode.min.js",
31 | "test": "node test/unit/testRunner"
32 | },
33 | "homepage": "https://docs.intellinode.ai",
34 | "devDependencies": {
35 | "browserify": "^17.0.0",
36 | "uglify-js": "^3.17.0"
37 | },
38 | "dependencies": {
39 | "cross-fetch": "^4.1.0",
40 | "dotenv": "^16.4.7",
41 | "form-data": "^4.0.1"
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/samples/command_sample/test_image_models.js:
--------------------------------------------------------------------------------
1 | const { RemoteImageModel, SupportedImageModels, ImageModelInput } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function generateImages(apiKey, provider, imageInput) {
7 | const imgModel = new RemoteImageModel(apiKey, provider);
8 | const images = await imgModel.generateImages(imageInput);
9 | console.log(`Generated images (${provider}):`);
10 | images.forEach(image => console.log('- ', image));
11 | }
12 |
13 | (async () => {
14 | // Generate image using OpenAI
15 | const openaiKey = process.env.OPENAI_API_KEY;
16 | const prompt = 'teddy writing a blog in times square';
17 | const openaiImageInput = new ImageModelInput({
18 | prompt: prompt,
19 | numberOfImages: 3
20 | });
21 |
22 | await generateImages(openaiKey, SupportedImageModels.OPENAI, openaiImageInput);
23 |
24 | // Generate image using Stability
25 | const stabilityKey = process.env.STABILITY_API_KEY;
26 | const stabilityImageInput = new ImageModelInput({
27 | prompt: prompt,
28 | numberOfImages: 1,
29 | width: 512,
30 | height: 512
31 | });
32 |
33 | await generateImages(stabilityKey, SupportedImageModels.STABILITY, stabilityImageInput);
34 | })();
--------------------------------------------------------------------------------
/samples/command_sample/test_language_models.js:
--------------------------------------------------------------------------------
1 | const { RemoteLanguageModel, SupportedLangModels, LanguageModelInput } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function generateText(apiKey, provider, model, prompt, temperature) {
7 | const langModel = new RemoteLanguageModel(apiKey, provider);
8 | const results = await langModel.generateText(new LanguageModelInput({
9 | prompt: prompt,
10 | model: model,
11 | temperature: temperature,
12 | maxTokens: 200
13 | }));
14 | console.log(`- Generated ${provider} text:`, results[0]);
15 | }
16 |
17 | (async () => {
18 | // Generate text using OpenAI
19 | const openaiKey = process.env.OPENAI_API_KEY;
20 | const openaiModel = 'gpt-3.5-turbo-instruct';
21 | const prompt = 'Write a product description for smart plug that works with voice assistant.';
22 | const temperature = 0.7;
23 |
24 | await generateText(openaiKey, SupportedLangModels.OPENAI, openaiModel, prompt, temperature);
25 |
26 | // Generate text using Cohere
27 | const cohereKey = process.env.COHERE_API_KEY;
28 | const cohereModel = 'command';
29 |
30 | await generateText(cohereKey, SupportedLangModels.COHERE, cohereModel, prompt, temperature);
31 | })();
--------------------------------------------------------------------------------
/IntelliNode/wrappers/ReplicateWrapper.js:
--------------------------------------------------------------------------------
1 | /*Apache License
2 | Copyright 2023 Github.com/Barqawiz/IntelliNode*/
3 | const config = require('../config.json');
4 | const connHelper = require('../utils/ConnHelper');
5 | const FetchClient = require('../utils/FetchClient');
6 |
7 | class ReplicateWrapper {
8 | constructor(apiKey) {
9 | this.API_BASE_URL = config.url.replicate.base;
10 | this.API_KEY = apiKey;
11 |
12 | this.client = new FetchClient({
13 | baseURL: this.API_BASE_URL,
14 | headers: {
15 | 'Content-Type': 'application/json',
16 | Authorization: `Token ${this.API_KEY}`
17 | }
18 | });
19 | }
20 |
21 | async predict(modelTag, inputData) {
22 | const endpoint = config.url.replicate.predictions;
23 | try {
24 | return await this.client.post(endpoint, inputData);
25 | } catch (error) {
26 | throw new Error(connHelper.getErrorMessage(error));
27 | }
28 | }
29 |
30 | async getPredictionStatus(predictionId) {
31 | const endpoint = `/v1/predictions/${predictionId}`;
32 | try {
33 | // GET request
34 | return await this.client.get(endpoint);
35 | } catch (error) {
36 | throw new Error(connHelper.getErrorMessage(error));
37 | }
38 | }
39 | }
40 |
41 | module.exports = ReplicateWrapper;
42 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/AzureRemoteEmbedModel.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { RemoteEmbedModel, SupportedEmbedModels } = require('../../controller/RemoteEmbedModel');
4 | const EmbedInput = require('../../model/input/EmbedInput');
5 | const ProxyHelper = require('../../utils/ProxyHelper');
6 |
7 | const openaiApiKey = process.env.AZURE_OPENAI_API_KEY;
8 |
9 |
10 | async function testOpenAIEmbeddings(proxyHelper, modelName) {
11 | console.log('start testOpenAIEmbeddings');
12 |
13 | const openaiEmbedModel = new RemoteEmbedModel(openaiApiKey, SupportedEmbedModels.OPENAI, proxyHelper);
14 |
15 | const embedInput = new EmbedInput({
16 | texts: ['Hello from OpenAI!', '您好,来自 OpenAI!'],
17 | model: modelName,
18 | });
19 |
20 | const results = await openaiEmbedModel.getEmbeddings(embedInput);
21 | console.log('OpenAI Embeddings:', results);
22 | assert(results.length > 0, 'Test passed');
23 | }
24 |
25 | (async () => {
26 |
27 | const args = process.argv.slice(2);
28 | const resourceName = args[0];
29 | const modelName = args[1];
30 |
31 | // set azure openai parameters
32 | proxyHelper = new ProxyHelper()
33 | proxyHelper.setAzureOpenai(resourceName);
34 |
35 | await testOpenAIEmbeddings(proxyHelper, modelName);
36 | })();
--------------------------------------------------------------------------------
/IntelliNode/function/SemanticSearchPaging.js:
--------------------------------------------------------------------------------
1 | const { SemanticSearch } = require('./SemanticSearch'); // assuming path
2 |
3 | class SemanticSearchPaging extends SemanticSearch {
4 | constructor(keyValue, provider, pivotItem, numberOfMatches) {
5 | super(keyValue, provider);
6 | this.pivotItem = pivotItem;
7 | this.numberOfMatches = numberOfMatches;
8 | this.textAndMatches = []; // To store { text: '...', similarity: 0.9 } results
9 | this.topMatches = [];
10 | }
11 |
12 | async addNewData(newSearchItems) {
13 | // get the best matches for new items
14 | const newMatches = await super.getTopMatches(this.pivotItem, newSearchItems, newSearchItems.length);
15 |
16 | // map the matches format
17 | const newMatchesWithText = newMatches.map(match => ({
18 | text: newSearchItems[match.index],
19 | score: match.similarity,
20 | }));
21 |
22 | // combine with old top matches and sort
23 | this.topMatches = [...this.topMatches, ...newMatchesWithText]
24 | .sort((a, b) => b.score - a.score)
25 | .slice(0, this.numberOfMatches);
26 | }
27 |
28 | getCurrentTopMatches() {
29 | return this.topMatches;
30 | }
31 |
32 | clean() {
33 | this.topMatches = [];
34 | }
35 | }
36 |
37 | module.exports = { SemanticSearchPaging };
--------------------------------------------------------------------------------
/IntelliNode/utils/MatchHelpers.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class MatchHelpers {
9 |
10 | static cosineSimilarity(a, b) {
11 | if (a.length !== b.length) {
12 | throw new Error('Vectors must have the same dimensions');
13 | }
14 | const dotProduct = a.reduce((sum, ai, i) => sum + ai * b[i], 0);
15 | const magnitudeA = Math.sqrt(a.reduce((sum, ai) => sum + ai * ai, 0));
16 | const magnitudeB = Math.sqrt(b.reduce((sum, bi) => sum + bi * bi, 0));
17 |
18 | return dotProduct / (magnitudeA * magnitudeB);
19 | }
20 |
21 | static euclideanDistance(a, b) {
22 | if (a.length !== b.length) {
23 | throw new Error('Vectors must have the same dimensions');
24 | }
25 |
26 | const distance = Math.sqrt(
27 | a.reduce((sum, ai, i) => sum + (ai - b[i]) ** 2, 0)
28 | );
29 |
30 | return distance;
31 | }
32 |
33 | static manhattanDistance(a, b) {
34 | if (a.length !== b.length) {
35 | throw new Error('Vectors must have the same dimensions');
36 | }
37 |
38 | const distance = a.reduce((sum, ai, i) => sum + Math.abs(ai - b[i]), 0);
39 |
40 | return distance;
41 | }
42 |
43 | }
44 |
45 | module.exports = MatchHelpers;
--------------------------------------------------------------------------------
/IntelliNode/test/unit/HuggingWrapper.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const HuggingWrapper = require('../../wrappers/HuggingWrapper');
3 | const config = require('../../config.json');
4 |
5 | function testHuggingWrapper() {
6 | const apiKey = 'your-api-key';
7 | const huggingWrapper = new HuggingWrapper(apiKey);
8 |
9 | assert.strictEqual(
10 | huggingWrapper.API_KEY,
11 | apiKey,
12 | 'API key should be set'
13 | );
14 | assert.ok(
15 | huggingWrapper.client,
16 | 'httpClient should be created'
17 | );
18 |
19 | // Test httpClient configuration
20 | const expectedBaseURL = config.url.huggingface.base;
21 | const expectedContentType = 'application/json';
22 | const expectedAuthHeader = `Bearer ${apiKey}`;
23 |
24 | assert.strictEqual(
25 | huggingWrapper.client.baseURL,
26 | expectedBaseURL,
27 | 'httpClient baseURL should be set correctly'
28 | );
29 | assert.strictEqual(
30 | huggingWrapper.client.defaultHeaders['Content-Type'],
31 | expectedContentType,
32 | 'httpClient Content-Type header should be set correctly'
33 | );
34 | assert.strictEqual(
35 | huggingWrapper.client.defaultHeaders['Authorization'],
36 | expectedAuthHeader,
37 | 'httpClient Authorization header should be set correctly'
38 | );
39 | }
40 |
41 | module.exports = testHuggingWrapper;
42 |
--------------------------------------------------------------------------------
/samples/command_sample/test_chatbot_proxy.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, ChatGPTInput, ChatGPTMessage, ProxyHelper } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | // important note: using proxies is your responsibility to ensure the provider is safe
7 | const openaiProxyJson = {
8 | "url":"https://chimeragpt.adventblocks.cc",
9 | "completions":"/v1/completions",
10 | "chatgpt":"/v1/chat/completions",
11 | "imagegenerate":"/v1/images/generations",
12 | "embeddings": "/v1/embeddings",
13 | "audiotranscriptions": "/v1/audio/transcriptions"
14 | }
15 |
16 | const proxyHelper = new ProxyHelper();
17 | proxyHelper.setOpenaiProxyValues(openaiProxyJson)
18 |
19 | async function callChatbot(apiKey, provider) {
20 | const chatbot = new Chatbot(apiKey, provider, proxyHelper);
21 |
22 | const system = 'You are a helpful assistant.';
23 | const input = new ChatGPTInput(system);
24 | input.addUserMessage('为什么答案是42?');
25 | input.numberOfOutputs = 1;
26 |
27 | const responses = await chatbot.chat(input);
28 |
29 | console.log(`Chatbot responses (${provider}):`);
30 | responses.forEach(response => console.log('- ', response));
31 | }
32 |
33 | (async () => {
34 | // Test chatbot using OpenAI
35 | await callChatbot(process.env.OPENAI_API_KEY, 'openai');
36 | })();
37 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/AWSLLamav2.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const AWSEndpointWrapper = require('../../wrappers/AWSEndpointWrapper');
3 |
4 | const awsWrapper = new AWSEndpointWrapper(process.env.AWS_API_URL);
5 |
6 | async function testAWSEndpointWrapper() {
7 | try {
8 | const inputData = {
9 | "inputs": [
10 | [
11 | {
12 | "role": "system",
13 | "content": "You are helpful assistant"
14 | },
15 | {
16 | "role": "user",
17 | "content": "Explain the plot of the Inception movie in one line."
18 | }
19 | ]
20 | ],
21 | "parameters": {
22 | "max_new_tokens": 200,
23 | "temperature": 0.7
24 | }
25 | };
26 |
27 | const prediction = await awsWrapper.predict(inputData);
28 |
29 | console.log('AWS Predict Result:', prediction);
30 | /*
31 | * response example:
32 | * [
33 | * {
34 | * generation: {
35 | * role: 'assistant',
36 | * content: " ...."
37 | * }
38 | * }
39 | * ]
40 | */
41 |
42 | } catch (error) {
43 | console.error('AWS Error:', error);
44 | }
45 | }
46 |
47 | (async () => {
48 | await testAWSEndpointWrapper();
49 | })();
--------------------------------------------------------------------------------
/IntelliNode/wrappers/VLLMWrapper.js:
--------------------------------------------------------------------------------
1 | const FetchClient = require('../utils/FetchClient');
2 | const connHelper = require('../utils/ConnHelper');
3 |
4 | class VLLMWrapper {
5 | constructor(apiBaseUrl) {
6 | this.client = new FetchClient({
7 | baseURL: apiBaseUrl,
8 | headers: {
9 | 'Content-Type': 'application/json'
10 | }
11 | });
12 | }
13 |
14 | async generateText(params) {
15 | const endpoint = '/v1/completions';
16 | try {
17 | const extraConfig = params.stream ? { responseType: 'stream' } : {};
18 | return await this.client.post(endpoint, params, extraConfig);
19 | } catch (error) {
20 | throw new Error(connHelper.getErrorMessage(error));
21 | }
22 | }
23 |
24 | async generateChatText(params) {
25 | const endpoint = '/v1/chat/completions';
26 | try {
27 | const extraConfig = params.stream ? { responseType: 'stream' } : {};
28 | return await this.client.post(endpoint, params, extraConfig);
29 | } catch (error) {
30 | throw new Error(connHelper.getErrorMessage(error));
31 | }
32 | }
33 |
34 | async getEmbeddings(texts) {
35 | const endpoint = '/embed';
36 | try {
37 | return await this.client.post(endpoint, { texts });
38 | } catch (error) {
39 | throw new Error(connHelper.getErrorMessage(error));
40 | }
41 | }
42 | }
43 |
44 | module.exports = VLLMWrapper;
--------------------------------------------------------------------------------
/samples/command_sample/test_hugging_face.js:
--------------------------------------------------------------------------------
1 | const { HuggingWrapper } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | // common object
7 | const huggingWrapper = new HuggingWrapper(process.env.HUGGING_API_KEY);
8 |
9 | async function testSummarizationTask() {
10 | const inputData = { inputs: 'The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building...' };
11 | // facebook/bart-large-cnn is the model id
12 | const result = await huggingWrapper.generateText(modelId='facebook/bart-large-cnn', data=inputData);
13 | console.log('Summarization Task Result:', result);
14 | }
15 |
16 | async function testImageClassificationTask(imagePath) {
17 | const imageData = require('fs').readFileSync(imagePath);
18 | const result = await huggingWrapper.processImage(modelId='google/vit-base-patch16-224', data=imageData);
19 | console.log('Image Classification Task Result:', result);
20 | }
21 |
22 | (async () => {
23 | // test text
24 | await testSummarizationTask();
25 |
26 | // test image
27 | const args = process.argv.slice(2);
28 | const imagePath = args[0];
29 |
30 | if (imagePath) {
31 | await testImageClassificationTask(imagePath);
32 | } else {
33 | console.log('Image file not provided. Skipping Image Classification Task.');
34 | }
35 |
36 | })();
37 |
--------------------------------------------------------------------------------
/IntelliNode/test/unit/CohereAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const CohereAIWrapper = require('../../wrappers/CohereAIWrapper');
3 | const config = require('../../config.json');
4 |
5 | function testCohereAIWrapper() {
6 | const apiKey = 'your-api-key';
7 | const cohereAIWrapper = new CohereAIWrapper(apiKey);
8 |
9 | assert.strictEqual(
10 | cohereAIWrapper.API_KEY,
11 | apiKey,
12 | 'API key should be set'
13 | );
14 | assert.ok(
15 | cohereAIWrapper.client,
16 | 'client should be created'
17 | );
18 |
19 | // Test httpClient configuration
20 | const expectedBaseURL = config.url.cohere.base;
21 | const expectedCohereVersion = config.url.cohere.version;
22 | const expectedContentType = 'application/json';
23 | const expectedAuthHeader = `Bearer ${apiKey}`;
24 |
25 | assert.strictEqual(
26 | cohereAIWrapper.client.baseURL,
27 | expectedBaseURL,
28 | 'httpClient baseURL should be set correctly'
29 | );
30 | assert.strictEqual(
31 | cohereAIWrapper.client.defaultHeaders['Content-Type'],
32 | expectedContentType,
33 | 'httpClient Content-Type header should be set correctly'
34 | );
35 | assert.strictEqual(
36 | cohereAIWrapper.client.defaultHeaders['Authorization'],
37 | expectedAuthHeader,
38 | 'httpClient Authorization header should be set correctly'
39 | );
40 | }
41 |
42 | module.exports = testCohereAIWrapper;
43 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/HuggingWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const HuggingWrapper = require('../../wrappers/HuggingWrapper');
3 |
4 | const huggingWrapper = new HuggingWrapper(process.env.HUGGING_API_KEY);
5 |
6 | async function testSummarizationTask() {
7 | try {
8 | const modelId = 'facebook/bart-large-cnn';
9 | const inputData = { inputs: 'The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building...' };
10 | const result = await huggingWrapper.generateText(modelId, inputData);
11 | console.log('Summarization Task Result:', result);
12 | } catch (error) {
13 | console.error('Summarization Task Error:', error);
14 | }
15 | }
16 |
17 | async function testImageClassificationTask(imagePath) {
18 | try {
19 | const modelId = 'google/vit-base-patch16-224';
20 | const imageData = require('fs').readFileSync(imagePath);
21 | const result = await huggingWrapper.processImage(modelId, imageData);
22 | console.log('Image Classification Task Result:', result);
23 | } catch (error) {
24 | console.error('Image Classification Task Error:', error);
25 | }
26 | }
27 |
28 | (async () => {
29 | // test text
30 | await testSummarizationTask();
31 | // text image
32 | const args = process.argv.slice(2);
33 | const imagePath = args[0];
34 |
35 | if (imagePath) {
36 | await testImageClassificationTask(imagePath);
37 | } else {
38 | console.log('Image file not provided. Skipping Image Classification Task.');
39 | }
40 | })();
--------------------------------------------------------------------------------
/samples/command_sample/test_semantic_search_pagination.js:
--------------------------------------------------------------------------------
1 | const { SemanticSearchPaging, SupportedEmbedModels } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callSemanticSearch(apiKey, provider) {
7 | const pivotItem = 'Hello from IntelliNode!';
8 | const searchArray1 = ['Greetings from IntelliNode!', 'Bonjour de IntelliNode!', '来自 IntelliNode 的问候!'] ;
9 | const searchArray2 = ['Saudações do IntelliNode!', 'Hola desde IntelliNode!', 'Groeten van IntelliNode!'];
10 | const numberOfMatches = 2;
11 |
12 | const search = new SemanticSearchPaging(apiKey,
13 | provider,
14 | pivotItem,
15 | numberOfMatches);
16 |
17 | await search.addNewData(searchArray1);
18 | await search.addNewData(searchArray2);
19 |
20 |
21 | const results = await search.getCurrentTopMatches();
22 |
23 | console.log('Semantic Search Results:\n', results);
24 |
25 |
26 | }
27 |
28 | (async () => {
29 |
30 | // Test the search using openAI
31 | console.log('### Openai extended semantic search ###')
32 | await callSemanticSearch(process.env.OPENAI_API_KEY, SupportedEmbedModels.OPENAI);
33 |
34 | // Test the search using cohere
35 | console.log('\n### Cohere extended semantic search ###')
36 | await callSemanticSearch(process.env.COHERE_API_KEY, SupportedEmbedModels.COHERE);
37 |
38 | })();
39 |
--------------------------------------------------------------------------------
/IntelliNode/wrappers/IntellicloudWrapper.js:
--------------------------------------------------------------------------------
1 | /*Apache License
2 | Copyright 2023 Github.com/Barqawiz/IntelliNode*/
3 | const FormData = require('form-data');
4 | const config = require('../config.json');
5 | const connHelper = require('../utils/ConnHelper');
6 | const FetchClient = require('../utils/FetchClient');
7 |
8 | class IntellicloudWrapper {
9 | constructor(apiKey, apiBase = null) {
10 | this.ONE_KEY = apiKey;
11 | if (apiBase) {
12 | this.API_BASE_URL = apiBase;
13 | } else {
14 | this.API_BASE_URL = config.url.intellicloud.base;
15 | }
16 |
17 | this.client = new FetchClient({
18 | baseURL: this.API_BASE_URL
19 | // We'll add headers at runtime if needed
20 | });
21 | }
22 |
23 | async semanticSearch(queryText, k = 3, filters = {}) {
24 | if (!k || k === undefined) {
25 | k = 3;
26 | }
27 | const endpoint = config.url.intellicloud.semantic_search;
28 |
29 | const form = new FormData();
30 | form.append('one_key', this.ONE_KEY);
31 | form.append('query_text', queryText);
32 | form.append('k', k);
33 |
34 | if (filters && filters.document_name) {
35 | form.append('document_name', filters.document_name);
36 | }
37 |
38 | try {
39 | // Pass the FormData directly
40 | const response = await this.client.post(endpoint, form);
41 | return response.data; // The API returns { data: ... }
42 | } catch (error) {
43 | throw new Error(connHelper.getErrorMessage(error));
44 | }
45 | }
46 | }
47 |
48 | module.exports = IntellicloudWrapper;
49 |
--------------------------------------------------------------------------------
/samples/command_sample/test_chat_context.js:
--------------------------------------------------------------------------------
1 | const { ChatContext } = require('intellinode');
2 | require("dotenv").config();
3 | const assert = require('assert');
4 |
5 | const apiKey = process.env.OPENAI_API_KEY;
6 |
7 | async function testGetSimpleContext() {
8 |
9 | const context = new ChatContext(apiKey);
10 | const userMessage = "Hello";
11 | const historyMessages = ["Good morning", "Dinner time", "How can I help you?", "Hello"];
12 | const n = 3;
13 |
14 | const resultContext = await context.getStringContext(userMessage, historyMessages, n);
15 |
16 | console.log('result: ', resultContext)
17 |
18 | assert.strictEqual(resultContext.length, n);
19 | }
20 |
21 | // Test for getRoleContext
22 | async function testGetRoleContext() {
23 |
24 | const context = new ChatContext(apiKey);
25 | const userMessage = "Hello";
26 | const historyMessages = [
27 | { role: 'user', content: 'Dinner time' },
28 | { role: 'user', content: 'Good Morning' },
29 | { role: 'assistant', content: 'How can I help you?' },
30 | { role: 'user', content: 'Hello' }
31 | ];
32 | const n = 3;
33 |
34 | const resultContext = await context.getRoleContext(userMessage, historyMessages, n);
35 |
36 | console.log('resultContext: ', resultContext)
37 |
38 | assert.strictEqual(resultContext.length, n);
39 |
40 | }
41 |
42 |
43 | (async () => {
44 | console.log('### execute the string context history ###')
45 | await testGetSimpleContext();
46 |
47 | console.log('### execute the role dictionary context history ###')
48 | await testGetRoleContext();
49 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/integration/MistralAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const MistralAIWrapper = require('../../wrappers/MistralAIWrapper');
4 | const mistral = new MistralAIWrapper(process.env.MISTRAL_API_KEY);
5 |
6 | async function testMistralGenerateModel() {
7 | try {
8 | const params = {
9 | model: 'mistral-tiny',
10 | messages: [{"role": "user", "content": "Who is the most renowned French painter?"}]
11 | };
12 |
13 | const result = await mistral.generateText(params);
14 |
15 | console.log('Mistral Language Model Message:', result.choices[0]['message']['content']);
16 |
17 | } catch (error) {
18 | console.error('Mistral Language Model Error:', error);
19 | }
20 | }
21 |
22 | async function testMistralEmbeddings() {
23 | try {
24 | const params = {
25 | model: 'mistral-embed',
26 | input: ["Embed this sentence.", "As well as this one."]
27 | };
28 |
29 | const result = await mistral.getEmbeddings(params);
30 |
31 | console.log('result: ', result);
32 |
33 | const embeddings = result.data;
34 |
35 | console.log(
36 | 'Mistral Embeddings Result Sample:',
37 | embeddings[0]['embedding']
38 | );
39 |
40 | assert(
41 | embeddings.length > 0,
42 | 'testMistralEmbeddings response length should be greater than 0'
43 | );
44 |
45 | } catch (error) {
46 | console.error('Mistral Embeddings Error:', error);
47 | }
48 | }
49 |
50 | (async () => {
51 | await testMistralGenerateModel();
52 | await testMistralEmbeddings();
53 | })();
--------------------------------------------------------------------------------
/IntelliNode/wrappers/HuggingWrapper.js:
--------------------------------------------------------------------------------
1 | const config = require('../config.json');
2 | const connHelper = require('../utils/ConnHelper');
3 | const FetchClient = require('../utils/FetchClient');
4 |
5 | class HuggingWrapper {
6 | constructor(apiKey) {
7 | this.API_BASE_URL = config.url.huggingface.base;
8 | this.API_KEY = apiKey;
9 |
10 | this.client = new FetchClient({
11 | baseURL: this.API_BASE_URL,
12 | headers: {
13 | 'Content-Type': 'application/json',
14 | Authorization: `Bearer ${this.API_KEY}`
15 | }
16 | });
17 | }
18 |
19 | async generateText(modelId, data) {
20 | const endpoint = `/${modelId}`;
21 | try {
22 | return await this.client.post(endpoint, data);
23 | } catch (error) {
24 | throw new Error(connHelper.getErrorMessage(error));
25 | }
26 | }
27 |
28 | async generateImage(modelId, data) {
29 | const endpoint = `/${modelId}`;
30 | try {
31 | // We need arraybuffer to get raw image data
32 | return await this.client.post(endpoint, data, { responseType: 'arraybuffer' });
33 | } catch (error) {
34 | throw new Error(connHelper.getErrorMessage(error));
35 | }
36 | }
37 |
38 | async processImage(modelId, data) {
39 | const endpoint = `/${modelId}`;
40 | try {
41 | const arrayBuf = await this.client.post(endpoint, data, { responseType: 'arraybuffer' });
42 | return JSON.parse(Buffer.from(arrayBuf).toString());
43 | } catch (error) {
44 | throw new Error(connHelper.getErrorMessage(error));
45 | }
46 | }
47 | }
48 |
49 | module.exports = HuggingWrapper;
50 |
--------------------------------------------------------------------------------
/samples/frontend/README.md:
--------------------------------------------------------------------------------
1 | # IntelliNode Frontend
2 |
3 | IntelliNode provides a frontend version that runs directly in your browser.
4 |
5 |
6 |
7 | ## Demos
8 |
9 | This folder contains three interactive examples:
10 |
11 | 1. **Chat Playground** (`index.html`) - Multi-provider chat interface supporting OpenAI, Cohere, Mistral with model selection and real-time responses.
12 |
13 | 2. **Stability Control Studio** (`style_studio.html`) - Image transformation tool with three control methods (Style, Sketch, Structure) and AI-powered prompt enhancement.
14 |
15 | 3. **HTML Generator** (`html_generator.html`) - AI-powered HTML page generator using OpenAI GPT-5 and Cohere Command-A models. Features live preview, template examples, and download functionality.
16 |
17 | ## Running Locally
18 |
19 | 1. **Start a local server**, for example with Python 3:
20 | `python -m http.server 8000`
21 |
22 | 2. **Browse** to `http://localhost:8000`.
23 |
24 | 3. **Enter your API Key**, select the provider, write your prompt, and click **Send Request**
25 |
26 |
27 | ## Frontend JavaScript
28 |
29 | Include the following CDN script in your HTML:
30 | ```
31 |
32 | ```
33 | *Alternative mirror link:*
34 | ```
35 |
36 | ```
37 |
38 | check the [docs](https://docs.intellinode.ai/docs/npm/frontend) for more details.
39 |
40 | # License
41 | Apache License
42 |
43 | Copyright 2023 Github.com/Barqawiz/IntelliNode
44 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/StabilityAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const StabilityAIWrapper = require("../../wrappers/StabilityAIWrapper");
3 | const ImageModelInput = require("../../model/input/ImageModelInput");
4 | const fs = require('fs');
5 | const stabilityAI = new StabilityAIWrapper(process.env.STABILITY_API_KEY);
6 |
7 | async function testGenerateTextToImage() {
8 | try {
9 | // The v1 endpoint expects a JSON body
10 | const params = {
11 | text_prompts: [{ text: "A black and red gaming chair...", weight: 1.0 }],
12 | cfg_scale: 7,
13 | samples: 1,
14 | steps: 30
15 | // etc.
16 | };
17 |
18 | const result = await stabilityAI.generateTextToImage(params);
19 |
20 | console.log("Text to Image Result:", result);
21 | fs.writeFileSync('test_output.png', result.artifacts[0].base64, { encoding: 'base64' });
22 | } catch (error) {
23 | console.error("Text to Image Error:", error);
24 | }
25 | }
26 |
27 | async function testV2BetaCore() {
28 | try {
29 | const response = await stabilityAI.generateStableImageV2Beta({
30 | model: 'core',
31 | prompt: "Teddy writing a blog in Times Square, photorealistic",
32 | output_format: "webp",
33 | width: 512,
34 | height: 512,
35 | accept: "application/json"
36 | });
37 | console.log("v2beta (Core) JSON response:", response);
38 | fs.writeFileSync('test_v2beta_core.webp', response.image, { encoding: 'base64' });
39 | } catch (error) {
40 | console.error("testV2BetaCore Error:", error);
41 | }
42 | }
43 |
44 |
45 | (async () => {
46 | await testV2BetaCore(); //v2
47 | })();
--------------------------------------------------------------------------------
/samples/command_sample/shiba_image_generator.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const { RemoteImageModel, SupportedImageModels, ImageModelInput } = require('intellinode');
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function generateImages(apiKey, provider, imageInput) {
7 | const imgModel = new RemoteImageModel(apiKey, provider);
8 | const images = await imgModel.generateImages(imageInput);
9 | console.log(`Generated images (${provider}):`);
10 | images.forEach((image, index) => {
11 | fs.writeFileSync(`./temp/doge_image${index + 1}.png`, image, { encoding: 'base64' });
12 | });
13 | console.log('the images saved in the temp folder')
14 | }
15 |
16 | (async () => {
17 |
18 | // example 1: A cartoon-style Shiba Inu dog with a playful expression, sitting on a golden coin with "Doge" written on it, surrounded by other golden coins.
19 | // example 2: A cartoon-style Shiba Inu dog with a playful expression, standing on a patterned background with various dog toys scattered around. The background is filled with colorful paw prints and bones.
20 |
21 | prompt = 'A cartoon-style Shiba Inu dog with a playful expression, standing on a patterned background with various dog toys scattered around. The background is filled with colorful paw prints and bones.'
22 |
23 | // Generate image using Stability
24 | const myKey = process.env.OPENAI_API_KEY;
25 | const imageInput = new ImageModelInput({
26 | prompt: prompt,
27 | numberOfImages: 3,
28 | size: '512x512',
29 | responseFormat:'b64_json'
30 | });
31 |
32 | await generateImages(myKey, SupportedImageModels.OPENAI, imageInput);
33 | })();
34 |
--------------------------------------------------------------------------------
/samples/command_sample/test_finetuning.js:
--------------------------------------------------------------------------------
1 | const assert = require("assert");
2 | const FormData = require("form-data");
3 | const { RemoteFineTuneModel, SupportedFineTuneModels, FineTuneInput } = require("intellinode");
4 | const { createReadStream } = require("fs");
5 |
6 | require("dotenv").config();
7 | const openaiKey = process.env.OPENAI_API_KEY;
8 |
9 | async function testOpenAIFineTuneRemoteModel() {
10 | console.log('### Openai test case 1 ### \n');
11 | try {
12 | const tuner = new RemoteFineTuneModel(openaiKey, SupportedFineTuneModels.OPENAI);
13 |
14 | if (openaiKey === "") return;
15 |
16 | const filePath = '../../temp/training_data.jsonl'
17 |
18 | const filePayload = new FormData();
19 | filePayload.append('file', createReadStream(filePath));
20 | filePayload.append('purpose', 'fine-tune');
21 |
22 | const file = await tuner.uploadFile(filePayload)
23 |
24 | const input = new FineTuneInput({
25 | model: 'gpt-3.5-turbo',
26 | training_file: file.id
27 | })
28 |
29 | const result = await tuner.generateFineTune(input)
30 | const list = await tuner.listFineTune()
31 |
32 | const value = list.data.filter(b => b.id === result.id)
33 | console.log('Fine tuning Model Result:\n', value, '\n');
34 | assert(value.length > 0, 'testFineTuning response length should be greater than 0');
35 |
36 | } catch (error) {
37 | if (openaiKey === "") {
38 | console.log(
39 | "testOpenAIFineTuneRemoteModel: set the API key to run the test case."
40 | );
41 | } else {
42 | console.error("Test case failed with exception:", error);
43 | }
44 | }
45 | }
46 |
47 | (async () => {
48 | await testOpenAIFineTuneRemoteModel();
49 | })();
--------------------------------------------------------------------------------
/samples/command_sample/test_chatbot.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, ChatGPTInput, ChatGPTMessage } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callChatbot(apiKey, provider) {
7 | const chatbot = new Chatbot(apiKey, provider);
8 |
9 | const system = 'You are a helpful assistant.';
10 | const input = new ChatGPTInput(system, options={model: "o3-mini"});
11 | input.addUserMessage('what is the story of batman the dark night with less than 50 words');
12 | input.numberOfOutputs = 1;
13 |
14 | const responses = await chatbot.chat(input);
15 |
16 | console.log(`Chatbot responses (${provider}):`);
17 | responses.forEach(response => console.log('- ', response));
18 | }
19 |
20 | async function callChatbotStream(apiKey, provider) {
21 | const chatbot = new Chatbot(apiKey, provider);
22 |
23 | const system = 'You are a helpful assistant.';
24 | const input = new ChatGPTInput(system, options={model: "o3-mini"});
25 | input.addUserMessage('what is the story of batman the dark night with less than 50 words');
26 | input.numberOfOutputs = 1;
27 |
28 | let response = '';
29 | for await (const contentText of chatbot.stream(input)) {
30 | response += contentText;
31 | console.log('Received chunk:', contentText);
32 | }
33 |
34 | console.log(`Chatbot responses (${provider}):`);
35 | console.log('the full response: ', response)
36 | }
37 |
38 | (async () => {
39 | // Test chatbot using OpenAI
40 | console.log('test the chat function')
41 | await callChatbot(process.env.OPENAI_API_KEY, 'openai');
42 |
43 | console.log('test the stream function')
44 | await callChatbotStream(process.env.OPENAI_API_KEY, 'openai');
45 | })();
46 |
--------------------------------------------------------------------------------
/IntelliNode/wrappers/CohereAIWrapper.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 | Copyright 2023 Github.com/Barqawiz/IntelliNode*/
4 | const config = require('../config.json');
5 | const connHelper = require('../utils/ConnHelper');
6 | const FetchClient = require('../utils/FetchClient');
7 |
8 | class CohereAIWrapper {
9 | constructor(apiKey) {
10 | this.API_BASE_URL = config.url.cohere.base;
11 | this.COHERE_VERSION = config.url.cohere.version;
12 | this.API_KEY = apiKey;
13 |
14 | this.client = new FetchClient({
15 | baseURL: this.API_BASE_URL,
16 | headers: {
17 | 'Content-Type': 'application/json',
18 | Authorization: `Bearer ${this.API_KEY}`,
19 | 'Cohere-Version': this.COHERE_VERSION
20 | }
21 | });
22 | }
23 |
24 | async generateText(params) {
25 | const endpoint = config.url.cohere.completions;
26 | try {
27 | return await this.client.post(endpoint, params);
28 | } catch (error) {
29 | throw new Error(connHelper.getErrorMessage(error));
30 | }
31 | }
32 |
33 | async generateChatText(params) {
34 | const endpoint = '/chat';
35 | try {
36 | // If stream is true, set responseType='stream'
37 | const extraConfig = params.stream ? { responseType: 'stream' } : {};
38 | return await this.client.post(endpoint, params, extraConfig);
39 | } catch (error) {
40 | throw new Error(connHelper.getErrorMessage(error));
41 | }
42 | }
43 |
44 | async getEmbeddings(params) {
45 | const endpoint = config.url.cohere.embed;
46 | try {
47 | return await this.client.post(endpoint, params);
48 | } catch (error) {
49 | throw new Error(connHelper.getErrorMessage(error));
50 | }
51 | }
52 | }
53 |
54 | module.exports = CohereAIWrapper;
55 |
56 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/RemoteFineTune.test.js:
--------------------------------------------------------------------------------
1 | const assert = require("assert");
2 | const FormData = require("form-data");
3 | const { RemoteFineTuneModel, SupportedFineTuneModels } = require("../../controller/RemoteFineTuneModel");
4 | const { createReadStream } = require("fs");
5 | const FineTuneInput = require("../../model/input/FineTuneInput");
6 |
7 | require("dotenv").config();
8 | const openaiKey = process.env.OPENAI_API_KEY;
9 |
10 | async function testOpenAIFineTuneRemoteModel() {
11 | console.log('### Openai test case 1 ### \n');
12 | try {
13 | const tuner = new RemoteFineTuneModel(openaiKey, SupportedFineTuneModels.OPENAI);
14 |
15 | if (openaiKey === "") return;
16 |
17 | const filePath = '../temp/training_data.jsonl'
18 |
19 | const filePayload = new FormData();
20 | filePayload.append('file', createReadStream(filePath));
21 | filePayload.append('purpose', 'fine-tune');
22 |
23 | const file = await tuner.uploadFile(filePayload)
24 |
25 | const input = new FineTuneInput({
26 | model: 'gpt-4o',
27 | training_file: file.id
28 | })
29 |
30 | const result = await tuner.generateFineTune(input)
31 | const list = await tuner.listFineTune()
32 |
33 | const value = list.data.filter(b => b.id === result.id)
34 | console.log('Fine tuning Model Result:\n', value, '\n');
35 | assert(value.length > 0, 'testFineTuning response length should be greater than 0');
36 |
37 | } catch (error) {
38 | if (openaiKey === "") {
39 | console.log(
40 | "testOpenAIFineTuneRemoteModel: set the API key to run the test case."
41 | );
42 | } else {
43 | console.error("Test case failed with exception:", error);
44 | }
45 | }
46 | }
47 |
48 | (async () => {
49 | await testOpenAIFineTuneRemoteModel();
50 | })();
--------------------------------------------------------------------------------
/IntelliNode/wrappers/GoogleAIWrapper.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 | */
4 | const config = require('../config.json');
5 | const connHelper = require('../utils/ConnHelper');
6 | const FetchClient = require('../utils/FetchClient');
7 |
8 | class GoogleAIWrapper {
9 | constructor(apiKey) {
10 | this.API_SPEECH_URL = config.url.google.base.replace(
11 | '{1}',
12 | config.url.google.speech.prefix
13 | );
14 | this.API_KEY = apiKey;
15 |
16 | this.client = new FetchClient({
17 | baseURL: this.API_SPEECH_URL,
18 | headers: {
19 | 'Content-Type': 'application/json; charset=utf-8',
20 | 'X-Goog-Api-Key': this.API_KEY
21 | }
22 | });
23 | }
24 |
25 | async generateSpeech(params) {
26 | const endpoint =
27 | config.url.google.speech.prefix +
28 | config.url.google.speech.synthesize.postfix;
29 | const url = this.API_SPEECH_URL + config.url.google.speech.synthesize.postfix;
30 |
31 | const json = this.getSynthesizeInput(params);
32 | try {
33 | return await this.client.post(url, JSON.parse(json));
34 | } catch (error) {
35 | throw new Error(connHelper.getErrorMessage(error));
36 | }
37 | }
38 |
39 | getSynthesizeInput(params) {
40 | const text = params.text;
41 | const languageCode = params.languageCode;
42 | const name = params.name;
43 | const ssmlGender = params.ssmlGender;
44 |
45 | const modelInput = {
46 | input: {
47 | text: text
48 | },
49 | voice: {
50 | languageCode: languageCode,
51 | name: name,
52 | ssmlGender: ssmlGender
53 | },
54 | audioConfig: {
55 | audioEncoding: 'MP3'
56 | }
57 | };
58 |
59 | return JSON.stringify(modelInput);
60 | }
61 | }
62 |
63 | module.exports = GoogleAIWrapper;
64 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/SemanticSearch.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { SemanticSearch } = require('../../function/SemanticSearch');
4 | const { SupportedEmbedModels } = require('../../controller/RemoteEmbedModel');
5 |
6 | const openaiApiKey = process.env.OPENAI_API_KEY;
7 | const cohereApiKey = process.env.COHERE_API_KEY;
8 |
9 | const openaiSemanticSearch = new SemanticSearch(openaiApiKey, SupportedEmbedModels.OPENAI);
10 | const cohereSemanticSearch = new SemanticSearch(cohereApiKey, SupportedEmbedModels.COHERE);
11 |
12 | async function testOpenAISemanticSearch() {
13 | const pivotItem = 'Hello from OpenAI!';
14 | const searchArray = ['Greetings from OpenAI!', 'Bonjour de OpenAI!', 'Hola desde OpenAI!'];
15 | const numberOfMatches = 2;
16 |
17 | const results = await openaiSemanticSearch.getTopMatches(pivotItem, searchArray, numberOfMatches);
18 | console.log('OpenAI Semantic Search Results:', results);
19 | console.log('top matches:', openaiSemanticSearch.filterTopMatches(results, searchArray));
20 | assert(results.length === numberOfMatches, 'Test passed');
21 | }
22 |
23 | async function testCohereSemanticSearch() {
24 | const pivotItem = 'Hello from Cohere!';
25 | const searchArray = ['Greetings from Cohere!', 'Bonjour de Cohere!', 'Hola desde Cohere!'];
26 | const numberOfMatches = 2;
27 |
28 | const results = await cohereSemanticSearch.getTopMatches(pivotItem, searchArray, numberOfMatches);
29 | console.log('Cohere Semantic Search Results:', results);
30 | console.log('top matches:', openaiSemanticSearch.filterTopMatches(results, searchArray));
31 | assert(results.length === numberOfMatches, 'Test passed');
32 | }
33 |
34 | (async () => {
35 | await testOpenAISemanticSearch();
36 | await testCohereSemanticSearch();
37 | })();
--------------------------------------------------------------------------------
/IntelliNode/utils/Prompt.js:
--------------------------------------------------------------------------------
1 | const FileHelper = require('./FileHelper')
2 | const { Chatbot, SupportedChatModels } = require("../function/Chatbot");
3 | const { ChatGPTInput, ChatGPTMessage } = require("../model/input/ChatModelInput");
4 | const SystemHelper = require("../utils/SystemHelper");
5 |
6 | class Prompt {
7 | constructor(template) {
8 | this.template = template;
9 | }
10 |
11 | getInput() {
12 | return this.template;
13 | }
14 |
15 | format(data) {
16 | const regex = /\$\{([^}]+)\}/g;
17 | let result = this.template;
18 | let match;
19 |
20 | while ((match = regex.exec(this.template)) !== null) {
21 | const key = match[1];
22 | const value = data.hasOwnProperty(key) ? data[key] : '';
23 |
24 | result = result.replace(match[0], value);
25 | }
26 |
27 | return result;
28 | }
29 |
30 | static fromText(template) {
31 | return new Prompt(template);
32 | }
33 |
34 | static fromFile(filePath) {
35 | const template = FileHelper.readData(filePath, 'utf-8');
36 | return new Prompt(template);
37 | }
38 |
39 | static async fromChatGPT(promptTopic, apiKey, customProxyHelper=null, model='gpt-4') {
40 |
41 | const chatbot = new Chatbot(apiKey, SupportedChatModels.OPENAI, customProxyHelper);
42 |
43 | const promptExample = new SystemHelper().loadPrompt("prompt_example");
44 |
45 | const input = new ChatGPTInput("generate a prompt text, following prompt engineering best practices",
46 | { maxTokens: 800, model: model, temperature: 0.7 });
47 | input.addUserMessage(promptExample);
48 | input.addUserMessage(`Create a prompt: ${promptTopic}`);
49 |
50 | const responses = await chatbot.chat(input);
51 |
52 | return new Prompt(responses[0].trim());
53 | }
54 | }
55 |
56 | module.exports = Prompt;
--------------------------------------------------------------------------------
/samples/command_sample/test_text_analyzer.js:
--------------------------------------------------------------------------------
1 | const { TextAnalyzer, SupportedLangModels } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function testSummarizec(apiKey, provider) {
7 | const analyzer = new TextAnalyzer(apiKey, provider);
8 |
9 | const text = 'IntelliNode is a javascript library that integrates cutting-edge AI models into your project. With its intuitive functions, you can easily feed data to models like ChatGPT, WaveNet, and Stable diffusion and receive generated text, speech, or images. It also offers high-level functions such as semantic search and chatbot capabilities.What sets IntelliNode apart is its lightning-fast access to the latest deep learning models, allowing you to integrate them into your projects with just a few lines of code.';
10 | const summary = await analyzer.summarize(text);
11 | console.log(`${provider} Summary:`, summary);
12 |
13 | }
14 |
15 | async function testSentimentAnalysis(apiKey, provider) {
16 | const analyzer = new TextAnalyzer(apiKey, provider);
17 |
18 | const text = 'IntelliNode is an amazing AI library that makes it easy to integrate various AI models. I love using it!';
19 | const sentiment = await analyzer.sentimentAnalysis(text);
20 |
21 | console.log(`${provider} Sentiment Analysis: `, sentiment);
22 | }
23 |
24 |
25 | (async () => {
26 | // test the summary
27 | console.log('*** symmary ***')
28 | await testSummarizec(process.env.OPENAI_API_KEY, 'openai');
29 | await testSummarizec(process.env.COHERE_API_KEY, 'cohere');
30 |
31 | // test sentiment analysis
32 | console.log('*** sentiment analysis ***')
33 | await testSentimentAnalysis(process.env.OPENAI_API_KEY, 'openai');
34 | await testSentimentAnalysis(process.env.COHERE_API_KEY, 'cohere');
35 |
36 | })();
37 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/SemanticSearchPaging.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { SemanticSearchPaging } = require('../../function/SemanticSearchPaging');
4 | const { SupportedEmbedModels } = require('../../controller/RemoteEmbedModel');
5 |
6 | const openaiApiKey = process.env.OPENAI_API_KEY;
7 | const cohereApiKey = process.env.COHERE_API_KEY;
8 |
9 | const pivotItem = 'Hello from IntelliNode';
10 |
11 | const openaiSemanticSearch = new SemanticSearchPaging(openaiApiKey,
12 | SupportedEmbedModels.OPENAI,
13 | pivotItem, 2);
14 | const cohereSemanticSearch = new SemanticSearchPaging(cohereApiKey,
15 | SupportedEmbedModels.COHERE,
16 | pivotItem, 2);
17 |
18 | async function addToSessionAndTest(semanticSearch, newSearchItems) {
19 |
20 | await semanticSearch.addNewData( newSearchItems);
21 | const results = semanticSearch.getCurrentTopMatches();
22 |
23 | console.log('Semantic Search Session Results:', results);
24 | assert(results.length <= semanticSearch.numberOfMatches, 'Test passed');
25 | }
26 |
27 | (async () => {
28 |
29 | // semantic search with openai embedding
30 | await addToSessionAndTest(openaiSemanticSearch, ['Greetings from IntelliNode!', 'Saluti da IntelliNode!']);
31 | await addToSessionAndTest(openaiSemanticSearch, ['Hola desde IntelliNode!', 'Bonjour de IntelliNode!']);
32 |
33 | openaiSemanticSearch.clean();
34 |
35 | // semantic search with cohere embedding
36 | await addToSessionAndTest(cohereSemanticSearch, ['Greetings from IntelliNode!', 'Bonjour de IntelliNode!']);
37 | await addToSessionAndTest(cohereSemanticSearch, ['Hola desde IntelliNode!', 'Saluti da IntelliNode!']);
38 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/integration/AzureChatContext.test.js:
--------------------------------------------------------------------------------
1 | const ChatContext = require('../../utils/ChatContext');
2 | require("dotenv").config();
3 | const assert = require('assert');
4 | const ProxyHelper = require('../../utils/ProxyHelper');
5 | const apiKey = process.env.AZURE_OPENAI_API_KEY;
6 |
7 | async function testGetSimpleContext(proxyHelper, modelName) {
8 | const context = new ChatContext(apiKey, 'openai', proxyHelper);
9 | const userMessage = "Hello";
10 | const historyMessages = ["Good morning", "Dinner time", "How can I help you?", "Hello"];
11 | const n = 3;
12 |
13 | const resultContext = await context.getStringContext(userMessage, historyMessages, n, modelName);
14 |
15 | console.log('result: ', resultContext)
16 |
17 | assert.strictEqual(resultContext.length, n);
18 | }
19 |
20 | // Test for getRoleContext
21 | async function testGetRoleContext(proxyHelper, modelName) {
22 |
23 | const context = new ChatContext(apiKey, 'openai', proxyHelper);
24 | const userMessage = "Hello";
25 | const historyMessages = [
26 | { role: 'user', content: 'Dinner time' },
27 | { role: 'user', content: 'Good Morning' },
28 | { role: 'assistant', content: 'How can I help you?' },
29 | { role: 'user', content: 'Hello' }
30 | ];
31 | const n = 3;
32 |
33 | const resultContext = await context.getRoleContext(userMessage, historyMessages, n, modelName);
34 |
35 | console.log('resultContext: ', resultContext)
36 |
37 | assert.strictEqual(resultContext.length, n);
38 |
39 | }
40 |
41 |
42 | (async () => {
43 |
44 | const args = process.argv.slice(2);
45 | const resourceName = args[0];
46 | const modelName = args[1];
47 |
48 | // set azure openai parameters
49 | proxyHelper = new ProxyHelper()
50 | proxyHelper.setAzureOpenai(resourceName);
51 |
52 | testGetSimpleContext(proxyHelper, modelName);
53 | testGetRoleContext(proxyHelper, modelName);
54 |
55 | })();
--------------------------------------------------------------------------------
/samples/command_sample/test_chatbot_nvidia.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, NvidiaInput, SupportedChatModels } = require('intellinode');
2 |
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callChatbot(apiKey, provider) {
7 | const chatbot = new Chatbot(apiKey, provider);
8 |
9 | const system = 'You are a knowledgeable assistant about AI and technology.';
10 | const input = new NvidiaInput(system, {
11 | model: 'deepseek-ai/deepseek-r1',
12 | maxTokens: 512,
13 | temperature: 0.7
14 | });
15 |
16 | input.addUserMessage('What are the main differences between AI and Machine Learning? provide short answer');
17 |
18 | const responses = await chatbot.chat(input);
19 |
20 | console.log(`\nChatbot responses (${provider}):`);
21 | responses.forEach(response => console.log('- ', response));
22 | }
23 |
24 | async function callChatbotStream(apiKey, provider) {
25 | const chatbot = new Chatbot(apiKey, provider);
26 |
27 | const system = 'You are a poetic assistant.';
28 | const input = new NvidiaInput(system, {
29 | model: 'deepseek-ai/deepseek-r1',
30 | maxTokens: 512,
31 | temperature: 0.5,
32 | stream: true
33 | });
34 |
35 | input.addUserMessage('Write a haiku about artificial intelligence.');
36 |
37 | let response = '';
38 | console.log(`\n--- NVIDIA Streaming (${provider}) ---\n`);
39 |
40 | for await (const contentText of chatbot.stream(input)) {
41 | response += contentText;
42 | console.log('Received chunk:', contentText);
43 | }
44 |
45 | console.log(`\nChatbot responses (${provider}):`);
46 | console.log('The full response:', response);
47 | }
48 |
49 | (async () => {
50 | console.log('\n🔹 Testing NVIDIA DeepSeek Chat\n');
51 | await callChatbot(process.env.NVIDIA_API_KEY, SupportedChatModels.NVIDIA);
52 |
53 | console.log('\n🔹 Testing NVIDIA DeepSeek Streaming\n');
54 | await callChatbotStream(process.env.NVIDIA_API_KEY, SupportedChatModels.NVIDIA);
55 | })();
56 |
--------------------------------------------------------------------------------
/IntelliNode/test/unit/OpenAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const OpenAIWrapper = require('../../wrappers/OpenAIWrapper');
3 | const ProxyHelper = require('../../utils/ProxyHelper');
4 |
5 | function testOpenAIWrapper() {
6 | const apiKey = 'your-api-key';
7 | const proxyHelper = ProxyHelper.getInstance();
8 | const openAIWrapper = new OpenAIWrapper(apiKey);
9 |
10 | assert.strictEqual(
11 | openAIWrapper.API_KEY,
12 | apiKey,
13 | 'API key should be set'
14 | );
15 | assert.ok(openAIWrapper.client, 'httpClient should be created');
16 |
17 | // Test httpClient configuration
18 | const expectedBaseURL = proxyHelper.getOpenaiURL();
19 | const expectedContentType = 'application/json';
20 | const expectedAuthHeader = `Bearer ${apiKey}`;
21 |
22 | assert.strictEqual(
23 | openAIWrapper.client.baseURL,
24 | expectedBaseURL,
25 | 'httpClient baseURL should be set correctly'
26 | );
27 | assert.strictEqual(
28 | openAIWrapper.client.defaultHeaders['Content-Type'],
29 | expectedContentType,
30 | 'httpClient Content-Type header should be set correctly'
31 | );
32 | assert.strictEqual(
33 | openAIWrapper.client.defaultHeaders['Authorization'],
34 | expectedAuthHeader,
35 | 'httpClient Authorization header should be set correctly'
36 | );
37 | }
38 |
39 | function testOpenAIOrganization() {
40 | const proxyHelper = ProxyHelper.getInstance();
41 |
42 | // test null organization
43 | let organization = proxyHelper.getOpenaiOrg();
44 |
45 | assert.strictEqual(
46 | organization,
47 | null,
48 | 'openai organization should be null'
49 | );
50 |
51 | // test organization with value
52 | proxyHelper.setOpenaiOrg('test');
53 | organization = proxyHelper.getOpenaiOrg();
54 | assert.strictEqual(
55 | organization,
56 | 'test',
57 | 'openai organization value not correct'
58 | );
59 | }
60 |
61 | module.exports = { testOpenAIWrapper, testOpenAIOrganization };
62 |
--------------------------------------------------------------------------------
/samples/command_sample/test_llm_evaluation.test.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const assert = require('assert');
3 | const { LLMEvaluation, SupportedChatModels, SupportedLangModels } = require('intellinode');
4 | const openaiChatKey = process.env.OPENAI_API_KEY;
5 | const cohereCompletionKey = process.env.COHERE_API_KEY;
6 | const openaiChat = { apiKey: openaiChatKey, provider: SupportedChatModels.OPENAI,
7 | type: 'chat', model:'gpt-3.5-turbo', maxTokens: 50};
8 | const cohereCompletion = { apiKey: cohereCompletionKey, provider: SupportedLangModels.COHERE,
9 | type:'completion', model: 'command', maxTokens: 50};
10 |
11 | const llmEvaluation = new LLMEvaluation(openaiChatKey, 'openai');
12 |
13 | async function testLLMEvaluation() {
14 | const inputString = "Explain the process of photosynthesis in simple terms.";
15 | const targetAnswers = ["Photosynthesis is the process where green plants use sunlight to turn carbon dioxide and water into glucose and oxygen. The glucose provides food for the plant, and the oxygen gets released back into the air.",
16 | "Photosynthesis is how plants make their own food. They take in water and carbon dioxide, use the energy from sunlight to transform them into glucose (their food) and oxygen, which they release into the air.",
17 | "In simple terms, photosynthesis is like cooking for plants but instead of a stove, they use sunlight. They mix water and carbon dioxide with the sunlight to create glucose, which is their food, and also produce oxygen."];
18 | const providerSets = [openaiChat, cohereCompletion];
19 |
20 | const results = await llmEvaluation.compareModels(inputString, targetAnswers, providerSets);
21 |
22 | console.log('OpenAI Chat and Cohere Completion ModelEvaluation Results:', results);
23 |
24 | assert(Object.keys(results).length === providerSets.length+1, 'Test failed');
25 | }
26 |
27 | (async () => {
28 | await testLLMEvaluation();
29 | })();
--------------------------------------------------------------------------------
/IntelliNode/model/input/EmbedInput.js:
--------------------------------------------------------------------------------
1 | const config = require('../../config.json');
2 |
3 | class EmbedInput {
4 | constructor({
5 | texts,
6 | model = null,
7 | }) {
8 | this.texts = texts;
9 | this.model = model;
10 | }
11 |
12 | getCohereInputs() {
13 | const inputs = {
14 | texts: this.texts,
15 | ...this.model && { model: this.model },
16 | };
17 |
18 | return inputs;
19 | }
20 |
21 | getOpenAIInputs() {
22 | const inputs = {
23 | input: this.texts,
24 | ...this.model && { model: this.model },
25 | };
26 |
27 | return inputs;
28 | }
29 |
30 | getLlamaReplicateInput() {
31 | return {
32 | version: this.model,
33 | input: {
34 | prompts: this.texts.join("\n\n"),
35 | prompt_separator: "\n\n",
36 | }
37 | };
38 | }
39 |
40 | getGeminiInputs() {
41 | return {
42 | model: this.model,
43 | content: {
44 | parts: this.texts.map(text => ({text}))
45 | }
46 | };
47 | }
48 |
49 | getNvidiaInputs(input_type="query") {
50 | return {
51 | input: this.texts,
52 | model: this.model,
53 | input_type: input_type,
54 | encoding_format: "float",
55 | truncate: "NONE"
56 | };
57 | }
58 |
59 | getVLLMInputs() {
60 | return {
61 | texts: this.texts,
62 | };
63 | }
64 |
65 | setDefaultValues(provider) {
66 | if (provider === "openai") {
67 | this.model = "text-embedding-3-small";
68 | } else if (provider === "cohere") {
69 | this.model = "embed-multilingual-v2.0";
70 | } else if (provider === "replicate") {
71 | this.model = config.models.replicate.llama['llama-2-13b-embeddings-version'];
72 | } else if (provider === "gemini") {
73 | this.model = "models/embedding-001";
74 | } else if (provider === "vllm") {
75 | this.model = null;
76 | } else {
77 | throw new Error("Invalid provider name");
78 | }
79 | }
80 | }
81 |
82 | module.exports = EmbedInput;
--------------------------------------------------------------------------------
/IntelliNode/test/unit/GoogleAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | const assert = require('assert');
2 | const GoogleAIWrapper = require('../../wrappers/GoogleAIWrapper');
3 | const config = require('../../config.json');
4 |
5 | function testGoogleAIWrapper() {
6 | const apiKey = 'your-api-key';
7 | const googleAIWrapper = new GoogleAIWrapper(apiKey);
8 |
9 | assert.strictEqual(
10 | googleAIWrapper.API_KEY,
11 | apiKey,
12 | 'API key should be set'
13 | );
14 | assert.ok(
15 | googleAIWrapper.client,
16 | 'client should be created'
17 | );
18 |
19 | // Test httpClient configuration
20 | const expectedBaseURL = config.url.google.base.replace(
21 | '{1}',
22 | config.url.google.speech.prefix
23 | );
24 | const expectedContentType = 'application/json; charset=utf-8';
25 |
26 | assert.strictEqual(
27 | googleAIWrapper.client.baseURL,
28 | expectedBaseURL,
29 | 'httpClient baseURL should be set correctly'
30 | );
31 | assert.strictEqual(
32 | googleAIWrapper.client.defaultHeaders['Content-Type'],
33 | expectedContentType,
34 | 'httpClient Content-Type header should be set correctly'
35 | );
36 | assert.strictEqual(
37 | googleAIWrapper.client.defaultHeaders['X-Goog-Api-Key'],
38 | apiKey,
39 | 'httpClient X-Goog-Api-Key header should be set correctly'
40 | );
41 |
42 | // Test getSynthesizeInput() method
43 | const params = {
44 | text: 'Hello world',
45 | languageCode: 'en-US',
46 | name: 'en-US-Wavenet-A',
47 | ssmlGender: 'MALE',
48 | };
49 | const expectedModelInput = JSON.stringify({
50 | input: {
51 | text: params.text,
52 | },
53 | voice: {
54 | languageCode: params.languageCode,
55 | name: params.name,
56 | ssmlGender: params.ssmlGender,
57 | },
58 | audioConfig: {
59 | audioEncoding: 'MP3',
60 | },
61 | });
62 |
63 | assert.strictEqual(
64 | googleAIWrapper.getSynthesizeInput(params),
65 | expectedModelInput,
66 | 'getSynthesizeInput() should return the correct model input as a JSON string'
67 | );
68 | }
69 |
70 | module.exports = testGoogleAIWrapper;
71 |
--------------------------------------------------------------------------------
/IntelliNode/model/input/LanguageModelInput.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class LanguageModelInput {
9 | constructor({
10 | prompt,
11 | model = null,
12 | temperature = null,
13 | maxTokens = null,
14 | numberOfOutputs = 1,
15 | }) {
16 | this.prompt = prompt;
17 | this.model = model;
18 | this.temperature = temperature;
19 | this.maxTokens = maxTokens;
20 | this.numberOfOutputs = numberOfOutputs;
21 | }
22 |
23 | getCohereInputs() {
24 | const inputs = {
25 | prompt: this.prompt,
26 | ...this.model && { model: this.model },
27 | ...this.temperature && { temperature: this.temperature },
28 | ...this.maxTokens && { max_tokens: this.maxTokens },
29 | ...this.numberOfOutputs && { num_generations: this.numberOfOutputs },
30 | };
31 |
32 | return inputs;
33 | }
34 |
35 | getOpenAIInputs() {
36 | const inputs = {
37 | prompt: this.prompt,
38 | ...this.model && { model: this.model },
39 | ...this.temperature && { temperature: this.temperature },
40 | ...this.maxTokens && { max_tokens: this.maxTokens },
41 | ...this.numberOfOutputs && { n: this.numberOfOutputs },
42 | };
43 |
44 | return inputs;
45 | }
46 |
47 | setDefaultValues(provider, tokenCount) {
48 |
49 | this.setDefaultModels(provider)
50 | if (provider === "openai") {
51 | this.temperature = 0.7;
52 | this.maxTokens = tokenCount;
53 | this.numberOfOutputs = 1;
54 | } else if (provider === "cohere") {
55 | this.temperature = 0.75;
56 | this.maxTokens = tokenCount;
57 | this.numberOfOutputs = 1;
58 | } else {
59 | throw new Error("Invalid provider name");
60 | }
61 | }
62 |
63 | setDefaultModels(provider) {
64 | if (provider === "openai") {
65 | this.model = "gpt-3.5-turbo-instruct";
66 | } else if (provider === "cohere") {
67 | this.model = "command";
68 | } else {
69 | throw new Error("Invalid provider name");
70 | }
71 | }
72 | }
73 |
74 | module.exports = LanguageModelInput;
--------------------------------------------------------------------------------
/IntelliNode/utils/SystemHelper.js:
--------------------------------------------------------------------------------
1 | const FileHelper = require('./FileHelper')
2 | const path = require("path");
3 |
4 | class SystemHelper {
5 | constructor() {
6 | this.systemsPath = path.join(__dirname, "..", "resource", "templates");
7 | }
8 |
9 | getPromptPath(fileType) {
10 | let promptPath = '';
11 | if (fileType === "sentiment") {
12 | promptPath = path.join(this.systemsPath, "sentiment_prompt.in");
13 | } else if (fileType === "summary") {
14 | promptPath = path.join(this.systemsPath, "summary_prompt.in");
15 | } else if (fileType === "html_page") {
16 | promptPath = path.join(this.systemsPath, "html_page_prompt.in");
17 | } else if (fileType === "graph_dashboard") {
18 | promptPath = path.join(this.systemsPath, "graph_dashboard_prompt.in");
19 | } else if (fileType === "instruct_update") {
20 | promptPath = path.join(this.systemsPath, "instruct_update.in");
21 | } else if (fileType === "prompt_example") {
22 | promptPath = path.join(this.systemsPath, "prompt_example.in");
23 | } else if (fileType === "augmented_chatbot") {
24 | promptPath = path.join(this.systemsPath, "augmented_chatbot.in");
25 | } else {
26 | throw new Error(`File type '${file_type}' not supported`);
27 | }
28 |
29 | return promptPath;
30 | }
31 |
32 | loadPrompt(fileType) {
33 | let promptPath = this.getPromptPath(fileType)
34 | const promptTemplate = FileHelper.readData(promptPath, 'utf-8');
35 |
36 | return promptTemplate;
37 |
38 | }
39 |
40 | loadStaticPrompt(fileType) {
41 |
42 | if (fileType === "augmented_chatbot") {
43 | return "Using the provided context, craft a cohesive response that directly addresses the user's query. " +
44 | "If the context lacks relevance or is absent, focus on generating a knowledgeable and accurate answer based on the user's question alone. " +
45 | "Aim for clarity and conciseness in your reply.\n" +
46 | "Context:\n" +
47 | "${semantic_search}" +
48 | "\n---------------------------------\n" +
49 | "User's Question:\n" +
50 | "${user_query}";
51 | }
52 |
53 | }
54 | }
55 |
56 | module.exports = SystemHelper;
--------------------------------------------------------------------------------
/IntelliNode/model/input/Text2SpeechInput.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class Text2SpeechInput {
9 | constructor({ text, language = "en-gb", gender = "FEMALE", voice, model = 'tts-1', stream = true }) {
10 | this.text = text;
11 | this.language = language.toLowerCase();
12 | this.gender = gender;
13 | this.voice = voice;
14 | this.model = model;
15 | this.stream = stream;
16 | }
17 |
18 | getGoogleInput() {
19 | const params = {
20 | text: this.text,
21 | languageCode: this.language,
22 | };
23 |
24 | if (this.language === "en-gb" || this.language === "en") {
25 | params.name = this.gender === "FEMALE" ? "en-GB-Standard-A" : "en-GB-Standard-B";
26 | params.ssmlGender = this.gender;
27 | } else if (this.language === "tr-tr" || this.language === "tr") {
28 | params.name = this.gender === "FEMALE" ? "tr-TR-Standard-A" : "tr-TR-Standard-B";
29 | params.ssmlGender = this.gender;
30 | } else if (this.language === "cmn-cn" || this.language === "cn") {
31 | params.name = this.gender === "FEMALE" ? "cmn-CN-Standard-A" : "cmn-CN-Standard-B";
32 | params.ssmlGender = this.gender;
33 | } else if (this.language === "de-de" || this.language === "de") {
34 | params.name = this.gender === "FEMALE" ? "de-DE-Standard-A" : "de-DE-Standard-B";
35 | params.ssmlGender = this.gender;
36 | } else if (this.language === "ar-xa" || this.language === "ar") {
37 | params.name = this.gender === "FEMALE" ? "ar-XA-Wavenet-A" : "ar-XA-Standard-B";
38 | params.ssmlGender = this.gender;
39 | } else {
40 | throw new Error("Unsupported language code: " + this.language);
41 | }
42 |
43 | return params;
44 | }
45 |
46 | getOpenAIInput() {
47 | const params = {
48 | input: this.text,
49 | voice: this.voice,
50 | model: this.model,
51 | stream: this.stream
52 | };
53 | return params;
54 | }
55 | }
56 |
57 | Text2SpeechInput.Gender = {
58 | FEMALE: "FEMALE",
59 | MALE: "MALE",
60 | };
61 |
62 | module.exports = Text2SpeechInput;
63 |
--------------------------------------------------------------------------------
/samples/content_generator_ui/app.js:
--------------------------------------------------------------------------------
1 | const { Gen } = require("intellinode");
2 | const express = require('express');
3 |
4 | const intelliCode = {
5 | async generateText(prompt) {
6 | // TODO: set the key value
7 | const apiKey = "";
8 | return await Gen.get_marketing_desc(prompt, apiKey);
9 | },
10 |
11 | async generateImage(prompt) {
12 | // TODO: set the key value
13 | const openaiKey = "";
14 | const stabilityKey = "";
15 | return await Gen.generate_image_from_desc(prompt, openaiKey, stabilityKey);
16 | },
17 |
18 | async generateAudio(text, base64 = true) {
19 | // TODO: set the key value
20 | const apiKey = "";
21 | const audioContent = await Gen.generate_speech_synthesis(text, apiKey);
22 | return base64 ? audioContent : Buffer.from(audioContent, "base64");
23 | },
24 | };
25 |
26 | const app = express();
27 | app.use(express.json());
28 | // serve static files
29 | const path = require("path");
30 | app.use(express.static(path.join(__dirname)));
31 |
32 | app.post('/generate-content', async (req, res) => {
33 | let errroType = '';
34 | try {
35 | const { product, type } = req.body;
36 | errroType = type;
37 |
38 | if (type === 'text') {
39 | const textPrompt = `Write a marketing copy for ${product}`;
40 | const text = await intelliCode.generateText(textPrompt);
41 | res.send({ text: text });
42 | } else if (type === 'image') {
43 | const imageData = await intelliCode.generateImage(product);
44 | res.send({ imageData: imageData });
45 | } else if (type === 'audio') {
46 | const textPrompt = `Write a marketing copy for ${product}`;
47 | const text = await intelliCode.generateText(textPrompt);
48 | const audioData = await intelliCode.generateAudio(text);
49 | res.send({ audioData: audioData });
50 | } else {
51 | res.status(400).send({ error: 'Invalid request type' });
52 | }
53 | } catch (error) {
54 | console.error('Error in /generate-content:', error);
55 | res.status(500).send({ error: 'Internal server error', message: `An error occurred while generating ${errroType} content. Make sure the key is valid.` });
56 | }
57 | });
58 |
59 | const PORT = process.env.PORT || 3000;
60 | app.listen(PORT, () => console.log(`Server listening on port ${PORT}`));
61 |
--------------------------------------------------------------------------------
/IntelliNode/wrappers/GeminiAIWrapper.js:
--------------------------------------------------------------------------------
1 | const config = require('../config.json');
2 | const { readFileSync } = require('fs');
3 | const connHelper = require('../utils/ConnHelper');
4 | const FetchClient = require('../utils/FetchClient');
5 |
6 | class GeminiAIWrapper {
7 | constructor(apiKey) {
8 | this.API_BASE_URL = config.url.gemini.base;
9 | this.API_KEY = apiKey;
10 |
11 | this.client = new FetchClient({
12 | baseURL: this.API_BASE_URL,
13 | headers: {
14 | 'Content-Type': 'application/json'
15 | }
16 | });
17 | }
18 |
19 | async generateContent(params, vision = false) {
20 | const endpoint = vision
21 | ? config.url.gemini.visionEndpoint
22 | : config.url.gemini.contentEndpoint;
23 |
24 | try {
25 | return await this.client.post(endpoint, params, {
26 | // If needed, you can specify { responseType: 'stream' } or 'arraybuffer'
27 | });
28 | } catch (error) {
29 | throw new Error(connHelper.getErrorMessage(error));
30 | }
31 | }
32 |
33 | async imageToText(userInput, filePath, extension) {
34 | const imageData = readFileSync(filePath, { encoding: 'base64' });
35 | const params = {
36 | contents: [
37 | {
38 | parts: [
39 | { text: `${userInput}` },
40 | {
41 | inline_data: {
42 | mime_type: `image/${extension}`,
43 | data: imageData
44 | }
45 | }
46 | ]
47 | }
48 | ]
49 | };
50 | return this.generateContent(params, true);
51 | }
52 |
53 | async getEmbeddings(params) {
54 | const endpoint = config.url.gemini.embeddingEndpoint;
55 | try {
56 | const response = await this.client.post(endpoint, params);
57 | return response.embedding;
58 | } catch (error) {
59 | throw new Error(connHelper.getErrorMessage(error));
60 | }
61 | }
62 |
63 | async getBatchEmbeddings(params) {
64 | const endpoint = config.url.gemini.batchEmbeddingEndpoint;
65 | try {
66 | const response = await this.client.post(endpoint, params);
67 | return response.embeddings;
68 | } catch (error) {
69 | throw new Error(connHelper.getErrorMessage(error));
70 | }
71 | }
72 | }
73 |
74 | module.exports = GeminiAIWrapper;
75 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/RemoteLanguageModel.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { RemoteLanguageModel, SupportedLangModels } = require('../../controller/RemoteLanguageModel');
4 | const LanguageModelInput = require('../../model/input/LanguageModelInput');
5 |
6 | const openaiApiKey = process.env.OPENAI_API_KEY;
7 | const cohereApiKey = process.env.COHERE_API_KEY;
8 |
9 | const openaiLanguageModel = new RemoteLanguageModel(openaiApiKey, SupportedLangModels.OPENAI);
10 | const cohereLanguageModel = new RemoteLanguageModel(cohereApiKey, SupportedLangModels.COHERE);
11 |
12 | async function testOpenAIGenerateOneOutput() {
13 | const langInput = new LanguageModelInput({
14 | prompt: 'Write a product description for any device input adapter.',
15 | model: 'gpt-4o',
16 | temperature: 0.7});
17 |
18 | //console.log('openAI inputs: ', langInput.getOpenAIInputs());
19 |
20 | const results = await openaiLanguageModel.generateText(langInput);
21 | console.log('OpenAI Generate One Output:', results[0]);
22 | assert(results.length > 0, 'Test passed');
23 | }
24 |
25 | async function testOpenAIGenerateMultipleOutputs() {
26 | const langInput = new LanguageModelInput({
27 | prompt:'Write a product description for any device input adapter.',
28 | model:'gpt-4o',
29 | numberOfOutputs:3,
30 | temperature:0.7})
31 |
32 | //console.log('openAI inputs: ', langInput.getOpenAIInputs());
33 |
34 |
35 | const results = await openaiLanguageModel.generateText(langInput);
36 | console.log('\nOpenAI Generate Multiple Outputs:', results);
37 | assert(results.length > 0, 'Test passed');
38 | }
39 |
40 | async function testCohereGenerateOneOutput() {
41 | const langInput = new LanguageModelInput({prompt:'Write a product description for any device input adapter.'});
42 | langInput.setDefaultValues(SupportedLangModels.COHERE);
43 |
44 | // console.log('cohere inputs: ', langInput.getCohereInputs());
45 |
46 | const results = await cohereLanguageModel.generateText(langInput);
47 | console.log('\nCohere Generate One Output:', results[0]);
48 | assert(results.length > 0, 'Test passed');
49 | }
50 |
51 | (async () => {
52 | await testOpenAIGenerateOneOutput();
53 | await testOpenAIGenerateMultipleOutputs();
54 | await testCohereGenerateOneOutput();
55 | })();
--------------------------------------------------------------------------------
/IntelliNode/function/TextAnalyzer.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | you may not use this file except in compliance with the License.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 | */
12 | const { RemoteLanguageModel, SupportedLangModels } = require("../controller/RemoteLanguageModel");
13 | const LanguageModelInput = require("../model/input/LanguageModelInput");
14 | const SystemHelper = require("../utils/SystemHelper");
15 |
16 | class TextAnalyzer {
17 | constructor(keyValue, provider = SupportedLangModels.OPENAI) {
18 | if (!Object.values(SupportedLangModels).includes(provider)) {
19 | throw new Error(`The specified provider '${provider}' is not supported. Supported providers are: ${Object.values(SupportedLangModels).join(", ")}`);
20 | }
21 | this.provider = provider;
22 | this.remoteLanguageModel = new RemoteLanguageModel(keyValue, provider);
23 | this.systemHelper = new SystemHelper();
24 | }
25 |
26 | async summarize(text, options = {}) {
27 | const summaryPromptTemplate = this.systemHelper.loadPrompt("summary");
28 | const prompt = summaryPromptTemplate.replace("${text}", text);
29 | const modelInput = new LanguageModelInput({
30 | prompt,
31 | maxTokens: options.maxTokens || null,
32 | temperature: options.temperature || 0.5,
33 | });
34 | modelInput.setDefaultModels(this.provider);
35 | const [summary] = await this.remoteLanguageModel.generateText(modelInput);
36 | return summary.trim();
37 | }
38 |
39 | async sentimentAnalysis(text, options = {}) {
40 | const mode = this.systemHelper.loadPrompt("sentiment");
41 | const prompt = `${mode}\n\nAnalyze the sentiment of the following text: ${text}\n\nSentiment: `;
42 |
43 | const modelInput = new LanguageModelInput({
44 | prompt,
45 | maxTokens: options.maxTokens || 60,
46 | temperature: options.temperature || 0,
47 | });
48 | modelInput.setDefaultModels(this.provider);
49 | const [sentiment] = await this.remoteLanguageModel.generateText(modelInput);
50 |
51 | const sentiment_output = JSON.parse(sentiment.trim());
52 | return sentiment_output;
53 | }
54 | }
55 |
56 | module.exports = { TextAnalyzer };
--------------------------------------------------------------------------------
/IntelliNode/test/integration/intellicloudWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const IntellicloudWrapper = require('../../wrappers/IntellicloudWrapper');
3 | const assert = require('assert');
4 |
5 | // Assuming you set your API key in an environment variable
6 | const oneKey = process.env.INTELLI_ONE_KEY;
7 | const apiBase = process.env.INTELLI_API_BASE;
8 | const intellicloud = new IntellicloudWrapper(oneKey, apiBase);
9 |
10 | async function testSemanticSearch() {
11 | try {
12 | // Replace with your actual query text and k value
13 | const queryText = "Why is Mars called the Red Planet?";
14 | const k = 2;
15 |
16 | const result = await intellicloud.semanticSearch(queryText, k);
17 |
18 | //console.log('Semantic Search Result:\n', result, '\n');
19 | result.forEach(document => {
20 | console.log('- Document Name:', document.document_name);
21 | console.log('Document Type:', document.document_type);
22 | document.data.forEach(dataItem => {
23 | console.log('Text:', dataItem.text);
24 | });
25 | console.log('\n');
26 | });
27 |
28 | assert(result.length > 0, 'Semantic search should return at least one result');
29 |
30 | } catch (error) {
31 | console.error('Semantic Search Test Error:', error);
32 | }
33 | }
34 |
35 | async function testSemanticSearchWithFilter() {
36 | try {
37 | // Replace with your actual query text and k value
38 | const queryText = "Why is Mars called the Red Planet?";
39 | const k = 2;
40 | const doc_name = 'test_mars_article.pdf'
41 |
42 | const result = await intellicloud.semanticSearch(queryText, k, {document_name:doc_name});
43 |
44 | //console.log('Semantic Search Result:\n', result, '\n');
45 | result.forEach(document => {
46 | console.log('- Document Name:', document.document_name);
47 | console.log('Document Type:', document.document_type);
48 | document.data.forEach(dataItem => {
49 | console.log('Text:', dataItem.text);
50 | });
51 | console.log('\n');
52 | });
53 |
54 | assert(result.length > 0, 'Semantic search should return at least one result');
55 |
56 | } catch (error) {
57 | console.error('Semantic Search Test Error:', error);
58 | }
59 | }
60 |
61 | (async () => {
62 | await testSemanticSearch();
63 | await testSemanticSearchWithFilter();
64 | })();
--------------------------------------------------------------------------------
/IntelliNode/utils/ConnHelper.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class ConnHelper {
9 | constructor() {
10 | }
11 |
12 | static convertMapToJson(params) {
13 | return JSON.stringify(params);
14 | }
15 |
16 | static getErrorMessage(error) {
17 | if (error.response && error.response.data) {
18 | return `Unexpected HTTP response: ${error.response.status} Error details: ${JSON.stringify(error.response.data)}`;
19 | }
20 | return error.message;
21 | }
22 |
23 | static readStream(stream) {
24 | return new Promise((resolve, reject) => {
25 | const chunks = [];
26 | stream.on('data', chunk => chunks.push(chunk));
27 | stream.on('error', err => reject(err));
28 | stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
29 | });
30 | }
31 |
32 | static async lambdaSagemakerInputPass(internal_endpoint,
33 | event,
34 | client,
35 | InvokeEndpointCommand,
36 | log=false) {
37 | if (!event.body) {
38 | return {
39 | statusCode: 400,
40 | body: "Invalid input: " + JSON.stringify(event.body)
41 | };
42 | }
43 | let jsonString = "";
44 | if (typeof event.body === 'object') {
45 | jsonString = JSON.stringify(event.body);
46 | } else {
47 | jsonString = event.body;
48 | }
49 |
50 | const command = new InvokeEndpointCommand({
51 | EndpointName: internal_endpoint,
52 | ContentType: 'application/json',
53 | Body: jsonString,
54 | CustomAttributes: "accept_eula=true",
55 | });
56 |
57 | const response = await client.send(command);
58 |
59 | // Convert buffer to string
60 | const bodyString = Buffer.from(response.Body).toString('utf8');
61 | if (log) {
62 | console.log("Converted Response.Body: ", bodyString);
63 | }
64 |
65 |
66 | try {
67 | return {
68 | statusCode: 200,
69 | body: JSON.stringify(JSON.parse(bodyString))
70 | };
71 |
72 | } catch (error) {
73 | console.error("Parsing Error: ", error);
74 | throw error;
75 | }
76 | }
77 | }
78 |
79 | module.exports = ConnHelper;
80 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/ChatbotNvidia.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { Chatbot, SupportedChatModels } = require('../../function/Chatbot');
4 | const { NvidiaInput } = require('../../model/input/ChatModelInput');
5 |
6 | async function testNvidiaGenerateText() {
7 | const bot = new Chatbot(process.env.NVIDIA_API_KEY, SupportedChatModels.NVIDIA);
8 | const input = new NvidiaInput('You are a helpful GPU-savvy assistant.', {
9 | model: 'deepseek-ai/deepseek-r1',
10 | maxTokens: 512,
11 | temperature: 0.6
12 | });
13 | input.addUserMessage('Which number is larger, 9.11 or 9.8?');
14 | const responses = await bot.chat(input);
15 | console.log('\n--- NVIDIA Generate Text ---\n');
16 | console.log(JSON.stringify(responses, null, 2));
17 | assert(responses.length > 0, 'No response from NVIDIA generate text');
18 | }
19 |
20 | async function testNvidiaStream() {
21 | const bot = new Chatbot(process.env.NVIDIA_API_KEY, SupportedChatModels.NVIDIA);
22 | const input = new NvidiaInput('You are a poetic assistant.', {
23 | model: 'meta/llama-3.3-70b-instruct',
24 | maxTokens: 512,
25 | temperature: 0.2,
26 | stream: true
27 | });
28 | input.addUserMessage('Write a limerick about GPU computing performance.');
29 | console.log('\n--- NVIDIA Streaming ---\n');
30 | const stream = bot.stream(input);
31 | for await (const chunk of stream) {
32 | process.stdout.write(chunk);
33 | }
34 | console.log('\n--- End of NVIDIA Streaming ---');
35 | }
36 |
37 | async function testNvidiaDeepSeekStreaming() {
38 | const bot = new Chatbot(process.env.NVIDIA_API_KEY, SupportedChatModels.NVIDIA);
39 | const input = new NvidiaInput('You are a helpful GPU-savvy assistant.', {
40 | model: 'deepseek-ai/deepseek-r1',
41 | maxTokens: 512,
42 | temperature: 0.6,
43 | stream: true
44 | });
45 | input.addUserMessage('Explain the benefits of GPU speed and performance.');
46 | console.log('\n--- NVIDIA Deep Seek Streaming ---\n');
47 | let streamedOutput = '';
48 | for await (const chunk of bot.stream(input)) {
49 | process.stdout.write(chunk);
50 | streamedOutput += chunk;
51 | }
52 | console.log('\n--- End of Deep Seek Streaming ---');
53 | assert(streamedOutput.length > 0, 'No content received from deepseek streaming.');
54 | }
55 |
56 | (async () => {
57 | await testNvidiaGenerateText();
58 | await testNvidiaStream();
59 | await testNvidiaDeepSeekStreaming();
60 | })();
61 |
--------------------------------------------------------------------------------
/samples/content_generator_ui/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Content Generation Platform
7 |
8 |
9 |
10 |
11 |
Content Generation Platform
12 |
13 |
20 |
21 |
26 |
27 |
28 |
Generated Text:
29 |
30 |
Generated Image:
31 |
![Generated Image]()
32 |
Generated Audio:
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
46 |
47 |
An error occurred.
48 |
49 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/samples/command_sample/test_chatbot_cohere.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, CohereInput } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callChatbot(apiKey, provider) {
7 | const chatbot = new Chatbot(apiKey, provider);
8 |
9 | const system = 'You are a helpful assistant.';
10 | const input = new CohereInput(system);
11 | input.addUserMessage('what is the story of batman the dark night with less than 50 words');
12 |
13 | const responses = await chatbot.chat(input);
14 |
15 | console.log(`Chatbot responses (${provider}):`);
16 | responses.forEach(response => console.log('- ', response));
17 | }
18 |
19 |
20 | async function callMultiMessageChatbot(apiKey, provider) {
21 | const chatbot = new Chatbot(apiKey, provider);
22 |
23 | const system = 'You are a helpful assistant.';
24 | const input = new CohereInput(system);
25 | input.addUserMessage("Explain the plot of the Inception movie in one line");
26 | input.addAssistantMessage("The plot of the movie Inception follows a skilled thief who enters people's dreams to steal their secrets and is tasked with implanting an idea into a target's mind to alter their future actions.");
27 | input.addUserMessage("Explain the plot of the dark night movie in one line");
28 |
29 | const responses = await chatbot.chat(input);
30 |
31 | console.log(`Chatbot responses (${provider}):`);
32 | responses.forEach(response => console.log('- ', response));
33 | }
34 |
35 | async function callChatbotStream(apiKey, provider) {
36 | const chatbot = new Chatbot(apiKey, provider);
37 |
38 | const system = 'You are a helpful assistant.';
39 | const input = new CohereInput(system);
40 | input.addUserMessage('what is the story of batman the dark night with less than 50 words');
41 |
42 | let response = '';
43 | for await (const contentText of chatbot.stream(input)) {
44 | response += contentText;
45 | console.log('Received chunk:', contentText);
46 | }
47 |
48 | console.log(`Chatbot responses (${provider}):`);
49 | console.log('the full response: ', response)
50 | }
51 |
52 | (async () => {
53 | // Test chatbot using Cohere
54 | console.log('test the chat function')
55 | await callChatbot(process.env.COHERE_API_KEY, 'cohere');
56 |
57 | console.log('test the multi message')
58 | await callMultiMessageChatbot(process.env.COHERE_API_KEY, 'cohere');
59 |
60 | console.log('test the stream function')
61 | await callChatbotStream(process.env.COHERE_API_KEY, 'cohere');
62 | })();
63 |
--------------------------------------------------------------------------------
/samples/command_sample/README.md:
--------------------------------------------------------------------------------
1 | # IntelliNode Sample
2 |
3 | ## Setup
4 | ```
5 | npm install
6 | ```
7 |
8 | ## Environment
9 | Create a `.env` file with the required access keys:
10 |
11 | ```sh
12 | # Access keys for various models
13 | OPENAI_API_KEY=
14 | COHERE_API_KEY=
15 | GOOGLE_API_KEY=
16 | STABILITY_API_KEY=
17 | HUGGING_API_KEY=
18 | REPLICATE_API_KEY=
19 |
20 | # Optional - AWS access credentials for S3 automation sample
21 | AWS_ACCESS_KEY_ID=
22 | AWS_SECRET_ACCESS_KEY=
23 | ```
24 | The Llama model is made available by several hosting services, and Replicate is among the platforms that offer hosting for the Llama model.
25 |
26 | ## Samples Execution
27 |
28 | 1. E-commerce sample to product descriptions description and images:
29 | `node ecommerce_tool.js`
30 |
31 | 2. Language model using openai and cohere:
32 | `node test_language_models.js`
33 |
34 | 3. Image model using stable diffusion and DALL·E 2:
35 | `node test_image_models.js`
36 |
37 | 4. Generate shiba images for fun:
38 | `node shiba_image_generator.js`
39 |
40 | 5. Speech synthesis:
41 | `node test_speech_models.js`
42 |
43 | 6. chatbot using ChatGPT:
44 | `node test_chatbot.js`
45 |
46 | 7. Semantic search:
47 | `node test_semantic_search.js`
48 |
49 | 8. Semantic search pagination to work with huge data:
50 | `node test_semantic_search.js`
51 |
52 | 9. Text analyzer (summary & sentiment analysis):
53 | `node test_text_analyzer.js`
54 |
55 | 10. Huggingface simplified inference access:
56 | `node test_hugging_face.js`
57 |
58 | 11. Azure openai sample
59 | `node test_azure_chatbot.js `
60 |
61 | 12. Automation sample using the chatbot code call, it works by providing the model with your function details, and it decides to execute the code based on the user conversation:
62 | `node automate_s3_bucket.js`
63 |
64 | 13. Llama V2 chatbot:
65 | `node test_llama_chatbot.js`
66 |
67 | 14. LLM evaluator to compare models like chatGPT, Cohere, and Llama:
68 | `node test_llm_evaluation.test.js`
69 |
70 |
71 | ## Access Keys
72 | Generate your access keys from the corresponding websites; You only need to generate keys for the models you'll use.
73 | For instance, if you're using the language model from OpenAI, there's no need for Cohere's keys.
74 |
75 | 1. openai: https://openai.com
76 | 2. cohere: https://cohere.com
77 | 3. google: https://console.cloud.google.com
78 | 4. stability: https://stability.ai
79 | 5. huggingface: https://huggingface.co
80 | 6. Replicate: https://replicate.com
81 |
--------------------------------------------------------------------------------
/IntelliNode/model/input/ImageModelInput.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | class ImageModelInput {
9 | constructor({
10 | prompt,
11 | numberOfImages = 1,
12 | imageSize = null,
13 | responseFormat = null,
14 | width = null,
15 | height = null,
16 | diffusion_cfgScale = null,
17 | diffusion_style_preset = null,
18 | engine = null,
19 | model = null,
20 | }) {
21 | this.prompt = prompt;
22 | this.numberOfImages = numberOfImages;
23 | this.imageSize = imageSize;
24 | this.responseFormat = responseFormat;
25 | this.width = width;
26 | this.height = height;
27 | this.diffusion_cfgScale = diffusion_cfgScale;
28 | this.diffusion_style_preset = diffusion_style_preset;
29 | this.engine = engine;
30 | this.model = model;
31 | if (width != null && height != null && imageSize == null) {
32 | this.imageSize = width+'x'+height;
33 | } else if (width == null && height == null && imageSize != null) {
34 | const sizesParts = imageSize.split('x').map(Number);
35 | this.width = sizesParts[0];
36 | this.height = sizesParts[1];
37 | }
38 | }
39 |
40 | getOpenAIInputs() {
41 |
42 | const inputs = {
43 | prompt: this.prompt,
44 | ...this.numberOfImages && { n: this.numberOfImages },
45 | ...this.imageSize && { size: this.imageSize },
46 | ...this.responseFormat && { response_format: this.responseFormat },
47 | ...this.model && { model: this.model }
48 | };
49 |
50 | return inputs;
51 | }
52 |
53 | getStabilityInputs() {
54 | const inputs = {
55 | text_prompts: [{ text: this.prompt }],
56 | ...this.numberOfImages && { samples: this.numberOfImages },
57 | ...this.height && { height: this.height },
58 | ...this.width && { width: this.width },
59 | ...this.diffusion_cfgScale && { cfg_scale: this.diffusion_cfgScale },
60 | ...this.diffusion_style_preset && {style_preset: this.diffusion_style_preset},
61 | ...this.engine && { engine: this.engine }
62 | };
63 |
64 | return inputs;
65 | }
66 |
67 | setDefaultValues(provider) {
68 | if (provider === "openai") {
69 | this.numberOfImages = 1;
70 | this.imageSize = '1024x1024';
71 | } else if (provider === "stability") {
72 | this.numberOfImages = 1;
73 | this.height = 512;
74 | this.width = 512;
75 | this.engine = 'stable-diffusion-xl-beta-v2-2-2';
76 | } else {
77 | throw new Error("Invalid provider name");
78 | }
79 | }
80 | }
81 |
82 | module.exports = ImageModelInput;
--------------------------------------------------------------------------------
/IntelliNode/controller/RemoteSpeechModel.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const GoogleAIWrapper = require('../wrappers/GoogleAIWrapper');
9 | const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
10 | const Text2SpeechInput = require('../model/input/Text2SpeechInput');
11 |
12 | const SupportedSpeechModels = {
13 | GOOGLE: 'google',
14 | OPENAI: 'openAi',
15 | };
16 |
17 | class RemoteSpeechModel {
18 | constructor(keyValue, provider) {
19 | if (!provider) {
20 | provider = SupportedSpeechModels.GOOGLE;
21 | }
22 |
23 | const supportedModels = this.getSupportedModels();
24 |
25 | if (supportedModels.includes(provider)) {
26 | this.initiate(keyValue, provider);
27 | } else {
28 | const models = supportedModels.join(' - ');
29 | throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
30 | }
31 | }
32 |
33 | initiate(keyValue, keyType) {
34 | this.keyType = keyType;
35 |
36 | if (keyType === SupportedSpeechModels.GOOGLE) {
37 | this.googleWrapper = new GoogleAIWrapper(keyValue);
38 | } else if (keyType === SupportedSpeechModels.OPENAI) {
39 | this.openAIWrapper = new OpenAIWrapper(keyValue);
40 | } else {
41 | throw new Error('Invalid provider name');
42 | }
43 | }
44 |
45 | getSupportedModels() {
46 | return Object.values(SupportedSpeechModels);
47 | }
48 |
49 | async generateSpeech(input) {
50 | if (this.keyType === SupportedSpeechModels.GOOGLE) {
51 | let params;
52 |
53 | if (input instanceof Text2SpeechInput) {
54 | params = input.getGoogleInput();
55 | } else if (typeof input === 'object') {
56 | params = input;
57 | } else {
58 | throw new Error('Invalid input: Must be an instance of Text2SpeechInput or a dictionary');
59 | }
60 |
61 | const response = await this.googleWrapper.generateSpeech(params);
62 | return response.audioContent;
63 | } else if (this.keyType === SupportedSpeechModels.OPENAI) {
64 | let params;
65 |
66 | if (input instanceof Text2SpeechInput) {
67 | params = input.getOpenAIInput();
68 | } else if (typeof input === 'object') {
69 | params = input;
70 | } else {
71 | throw new Error('Invalid input: Must be an instance of Text2SpeechInput or a dictionary');
72 | }
73 |
74 | const response = await this.openAIWrapper.textToSpeech(params);
75 | return response;
76 | } else {
77 | throw new Error('The keyType is not supported');
78 | }
79 | }
80 | }
81 |
82 | module.exports = {
83 | RemoteSpeechModel,
84 | SupportedSpeechModels,
85 | };
86 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 | .pnpm-debug.log*
9 |
10 | # Diagnostic reports (https://nodejs.org/api/report.html)
11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
12 |
13 | # Runtime data
14 | pids
15 | *.pid
16 | *.seed
17 | *.pid.lock
18 |
19 | # Directory for instrumented libs generated by jscoverage/JSCover
20 | lib-cov
21 |
22 | # Coverage directory used by tools like istanbul
23 | coverage
24 | *.lcov
25 |
26 | # nyc test coverage
27 | .nyc_output
28 |
29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
30 | .grunt
31 |
32 | # Bower dependency directory (https://bower.io/)
33 | bower_components
34 |
35 | # node-waf configuration
36 | .lock-wscript
37 |
38 | # Compiled binary addons (https://nodejs.org/api/addons.html)
39 | build/Release
40 |
41 | # Dependency directories
42 | node_modules/
43 | jspm_packages/
44 |
45 | # Snowpack dependency directory (https://snowpack.dev/)
46 | web_modules/
47 |
48 | # TypeScript cache
49 | *.tsbuildinfo
50 |
51 | # Optional npm cache directory
52 | .npm
53 |
54 | # Optional eslint cache
55 | .eslintcache
56 |
57 | # Optional stylelint cache
58 | .stylelintcache
59 |
60 | # Microbundle cache
61 | .rpt2_cache/
62 | .rts2_cache_cjs/
63 | .rts2_cache_es/
64 | .rts2_cache_umd/
65 |
66 | # Optional REPL history
67 | .node_repl_history
68 |
69 | # Output of 'npm pack'
70 | *.tgz
71 |
72 | # Yarn Integrity file
73 | .yarn-integrity
74 |
75 | # dotenv environment variable files
76 | .env
77 | .env.development.local
78 | .env.test.local
79 | .env.production.local
80 | .env.local
81 |
82 | # parcel-bundler cache (https://parceljs.org/)
83 | .cache
84 | .parcel-cache
85 |
86 | # Next.js build output
87 | .next
88 | out
89 |
90 | # Nuxt.js build / generate output
91 | .nuxt
92 | dist
93 |
94 | # Gatsby files
95 | .cache/
96 | # Comment in the public line in if your project uses Gatsby and not Next.js
97 | # https://nextjs.org/blog/next-9-1#public-directory-support
98 | # public
99 |
100 | # vuepress build output
101 | .vuepress/dist
102 |
103 | # vuepress v2.x temp and cache directory
104 | .temp
105 | .cache
106 |
107 | # Serverless directories
108 | .serverless/
109 |
110 | # FuseBox cache
111 | .fusebox/
112 |
113 | # DynamoDB Local files
114 | .dynamodb/
115 |
116 | # TernJS port file
117 | .tern-port
118 |
119 | # Stores VSCode versions used for testing VSCode extensions
120 | .vscode-test
121 |
122 | # yarn v2
123 | .yarn/cache
124 | .yarn/unplugged
125 | .yarn/build-state.yml
126 | .yarn/install-state.gz
127 | .pnp.*
128 |
129 | #custom
130 | temp/
131 | */.idea/
132 | samples/command_sample/.DS_Store
133 | .DS_Store
134 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/CustomGen.test.js:
--------------------------------------------------------------------------------
1 | const { Gen } = require("../function/Gen");
2 | require("dotenv").config();
3 | const assert = require("assert");
4 | const fs = require('fs');
5 |
6 | const openaiApiKey = process.env.OPENAI_API_KEY;
7 | const cohereApiKey = process.env.COHERE_API_KEY;
8 | const stabilityApiKey = process.env.STABILITY_API_KEY;
9 | const googleApiKey = process.env.GOOGLE_API_KEY;
10 |
11 | async function testGetMarketingDesc(custom_provider) {
12 |
13 | const prompt = "gaming chair.";
14 | let marketingDesc = '';
15 | if (custom_provider == 'openai') {
16 | marketingDesc = await Gen.get_marketing_desc(prompt, openaiApiKey);
17 | } else if (custom_provider == 'cohere') {
18 | marketingDesc = await Gen.get_marketing_desc(prompt, cohereApiKey, provider=custom_provider);
19 | }
20 |
21 | // console.log("Marketing Description:", marketingDesc);
22 |
23 | assert(marketingDesc.length > 0, "Test passed");
24 | }
25 |
26 | async function testGetBlogPost(custom_provider) {
27 |
28 | const prompt = "bitcoin positive and negative impact.";
29 | let result = '';
30 | if (custom_provider == 'openai') {
31 | result = await Gen.get_blog_post(prompt, openaiApiKey);
32 | } else if (custom_provider == 'cohere') {
33 | result = await Gen.get_blog_post(prompt, cohereApiKey, provider=custom_provider);
34 | }
35 |
36 | // console.log("model output:", result);
37 |
38 | assert(result.length > 0, "Test passed");
39 | }
40 |
41 | async function testGenerateImageFromDesc(custom_provider) {
42 | const prompt = "Generate an image of a futuristic city skyline.";
43 | let image = ''
44 | if (custom_provider == 'stability') {
45 | image = await Gen.generate_image_from_desc(prompt, openaiApiKey, stabilityApiKey, true);
46 | } else if (custom_provider == 'openai'){
47 | image = await Gen.generate_image_from_desc(prompt, openaiApiKey,
48 | openaiApiKey, true, provider='openai');
49 | }
50 | // console.log("Generated Image (Base64):", image);
51 | assert(image.length > 10, "Test passed");
52 | }
53 |
54 | (async () => {
55 |
56 | console.log('marketing description using openai')
57 | await testGetMarketingDesc('openai');
58 |
59 | console.log('\n')
60 |
61 | console.log('marketing description using cohere')
62 | await testGetMarketingDesc('cohere');
63 |
64 | console.log('\n')
65 |
66 | console.log('blog using openai')
67 | await testGetBlogPost('openai');
68 |
69 | console.log('\n')
70 |
71 | console.log('blog using cohere')
72 | await testGetBlogPost('cohere');
73 |
74 | console.log('\n')
75 |
76 | console.log('stability image')
77 | testGenerateImageFromDesc('stability')
78 |
79 | console.log('DALL·E 2 image')
80 | testGenerateImageFromDesc('openai')
81 |
82 | })();
--------------------------------------------------------------------------------
/IntelliNode/controller/RemoteLanguageModel.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
9 | const CohereAIWrapper = require('../wrappers/CohereAIWrapper');
10 | const LanguageModelInput = require('../model/input/LanguageModelInput');
11 |
12 | const SupportedLangModels = {
13 | OPENAI: 'openai',
14 | COHERE: 'cohere',
15 | };
16 |
17 | class RemoteLanguageModel {
18 | constructor(keyValue, provider) {
19 | if (!provider) {
20 | provider = SupportedLangModels.OPENAI;
21 | }
22 |
23 | const supportedModels = RemoteLanguageModel.getSupportedModels();
24 |
25 | if (supportedModels.includes(provider)) {
26 | this.initiate(keyValue, provider);
27 | } else {
28 | const models = supportedModels.join(' - ');
29 | throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
30 | }
31 | }
32 |
33 | initiate(keyValue, keyType) {
34 | this.keyType = keyType;
35 |
36 | if (keyType === SupportedLangModels.OPENAI) {
37 | this.openaiWrapper = new OpenAIWrapper(keyValue);
38 | } else if (keyType === SupportedLangModels.COHERE) {
39 | this.cohereWrapper = new CohereAIWrapper(keyValue);
40 | } else {
41 | throw new Error('Invalid provider name');
42 | }
43 | }
44 |
45 | static getSupportedModels() {
46 | return Object.values(SupportedLangModels);
47 | }
48 |
49 | async generateText(langInput) {
50 |
51 | let inputs;
52 |
53 | if (langInput instanceof LanguageModelInput) {
54 | if (this.keyType === SupportedLangModels.OPENAI) {
55 | inputs = langInput.getOpenAIInputs();
56 | } else if (this.keyType === SupportedLangModels.COHERE) {
57 | inputs = langInput.getCohereInputs();
58 | } else {
59 | throw new Error('The keyType is not supported');
60 | }
61 | } else if (typeof langInput === 'object') {
62 | inputs = langInput;
63 | } else {
64 | throw new Error('Invalid input: Must be an instance of LanguageModelInput or a dictionary');
65 | }
66 |
67 | if (this.keyType === SupportedLangModels.OPENAI) {
68 | const results = await this.openaiWrapper.generateText(inputs);
69 | return results.choices.map((choice) => choice.text);
70 | } else if (this.keyType === SupportedLangModels.COHERE) {
71 | const results = await this.cohereWrapper.generateText(inputs);
72 | return results.generations.map((generation) => generation.text);
73 | } else {
74 | throw new Error('The keyType is not supported');
75 | }
76 | }
77 | }
78 |
79 | module.exports = {
80 | RemoteLanguageModel,
81 | SupportedLangModels,
82 | };
--------------------------------------------------------------------------------
/IntelliNode/wrappers/NvidiaWrapper.js:
--------------------------------------------------------------------------------
1 | const config = require('../config.json');
2 | const connHelper = require('../utils/ConnHelper');
3 | const FetchClient = require('../utils/FetchClient');
4 |
5 | class NvidiaWrapper {
6 | /**
7 | * @param {string} apiKey - API key (if required for cloud usage)
8 | * @param {object} [options] - Optional settings.
9 | * options.baseUrl: Override the default base URL.
10 | */
11 | constructor(apiKey, options = {}) {
12 | // use the provided baseUrl (e.g. local NIM) or the default cloud URL
13 | this.API_BASE_URL = options.baseUrl || config.nvidia.base;
14 | this.ENDPOINT_CHAT = config.nvidia.chat;
15 | this.VERSION = config.nvidia.version;
16 |
17 | // build headers
18 | let headers = {
19 | 'Content-Type': 'application/json',
20 | Accept: 'application/json'
21 | };
22 | if (apiKey) {
23 | headers.Authorization = `Bearer ${apiKey}`;
24 | }
25 |
26 | this.client = new FetchClient({
27 | baseURL: this.API_BASE_URL,
28 | headers: headers
29 | });
30 | }
31 |
32 | async generateText(params) {
33 | if (params.stream === undefined) {
34 | params.stream = false;
35 | }
36 | try {
37 | const extraConfig = params.stream ? { responseType: 'stream' } : {};
38 | return await this.client.post(this.ENDPOINT_CHAT, params, extraConfig);
39 | } catch (error) {
40 | throw new Error(connHelper.getErrorMessage(error));
41 | }
42 | }
43 |
44 | async generateTextStream(params) {
45 | params.stream = true;
46 | try {
47 | return await this.client.post(this.ENDPOINT_CHAT, params, {
48 | responseType: 'stream'
49 | });
50 | } catch (error) {
51 | throw new Error(connHelper.getErrorMessage(error));
52 | }
53 | }
54 |
55 | /**
56 | * Generates embeddings using NVIDIA's embedding endpoint.
57 | * Expects the user to pass a `model` field inside params so that the endpoint
58 | * is constructed as:
59 | * {config.nvidia.embedding}/{model}/embeddings
60 | *
61 | * @param {object} params - Must include `model` and other required fields.
62 | */
63 | async generateRetrieval(params) {
64 | if (!params.model) {
65 | throw new Error("Missing 'model' parameter for embeddings");
66 | }
67 | // use the embedding base endpoint from config and append the user-specified model name.
68 | const baseEmbedding = config.nvidia.retrieval;
69 | // model name example snowflake/arctic-embed
70 | const embeddingEndpoint = `${baseEmbedding}/${params.model}/embeddings`;
71 | try {
72 | return await this.client.post(embeddingEndpoint, params);
73 | } catch (error) {
74 | throw new Error(connHelper.getErrorMessage(error));
75 | }
76 | }
77 | }
78 |
79 | module.exports = NvidiaWrapper;
80 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/ChatbotVLLM.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { Chatbot, SupportedChatModels } = require('../../function/Chatbot');
4 | const { VLLMInput } = require('../../model/input/ChatModelInput');
5 |
6 | // URLs from environment
7 | const urls = {
8 | deepseek: process.env.DEEPSEEK_VLLM_URL,
9 | gemma: process.env.GEMMA_VLLM_URL,
10 | llama: process.env.LLAMA_VLLM_URL,
11 | mistral: process.env.MISTRAL_VLLM_URL,
12 | embed: process.env.EMBED_VLLM_URL,
13 | };
14 |
15 | async function testModel(name, url, model, prompt, isChatSupported = true) {
16 | try {
17 | const bot = new Chatbot(null, SupportedChatModels.VLLM, null, { baseUrl: url });
18 |
19 | const input = new VLLMInput("You are a helpful assistant.", {
20 | model,
21 | maxTokens: 100,
22 | temperature: 0.7,
23 | });
24 |
25 | input.addUserMessage(prompt);
26 |
27 | const response = await bot.chat(input);
28 | console.log(`${name} response:`, response);
29 | assert(response[0].length > 0);
30 | } catch (error) {
31 | console.error(`${name} error:`, error.message);
32 | }
33 | }
34 |
35 | async function testVLLMStreaming() {
36 | try {
37 | console.log('\nTesting VLLM streaming with Mistral:');
38 | const bot = new Chatbot(null, SupportedChatModels.VLLM, null, { baseUrl: urls.mistral });
39 |
40 | const input = new VLLMInput("You are a helpful assistant.", {
41 | model: 'mistralai/Mistral-7B-Instruct-v0.2',
42 | maxTokens: 100,
43 | temperature: 0.7
44 | });
45 |
46 | console.log('vllm input: ', input)
47 |
48 | input.addUserMessage("What is machine learning?");
49 |
50 | let fullText = '';
51 | for await (const contentText of bot.stream(input)) {
52 | fullText += contentText;
53 | console.log('Received chunk:', contentText);
54 | }
55 |
56 | console.log('Full stream text: ', fullText);
57 | assert(fullText.length > 0);
58 | } catch (error) {
59 | console.error('VLLM streaming error:', error.message);
60 | }
61 | }
62 |
63 | (async () => {
64 | await testModel(
65 | 'Deepseek',
66 | urls.deepseek,
67 | 'deepseek-ai/DeepSeek-R1-Distill-Llama-8B',
68 | 'What is machine learning?'
69 | );
70 |
71 | await testModel(
72 | 'Gemma',
73 | urls.gemma,
74 | 'google/gemma-2-2b-it',
75 | 'What is machine learning?',
76 | false // Gemma does NOT support chat endpoint
77 | );
78 |
79 | await testModel(
80 | 'LLama',
81 | urls.llama,
82 | 'meta-llama/Llama-3.1-8B-Instruct',
83 | 'What is machine learning?'
84 | );
85 |
86 | await testModel(
87 | 'Mistral',
88 | urls.mistral,
89 | 'mistralai/Mistral-7B-Instruct-v0.2',
90 | 'What is machine learning?'
91 | );
92 | await testVLLMStreaming();
93 | })();
--------------------------------------------------------------------------------
/IntelliNode/controller/RemoteFineTuneModel.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const OpenAIWrapper = require('../wrappers/OpenAIWrapper');
9 | const FineTuneInput = require('../model/input/FineTuneInput');
10 |
11 | const SupportedFineTuneModels = {
12 | OPENAI: 'openAi',
13 | };
14 |
15 | class RemoteFineTuneModel {
16 | constructor(keyValue, provider) {
17 | if (!provider) {
18 | provider = SupportedFineTuneModels.OPENAI;
19 | }
20 |
21 | const supportedModels = this.getSupportedModels();
22 |
23 | if (supportedModels.includes(provider)) {
24 | this.initiate(keyValue, provider);
25 | } else {
26 | const models = supportedModels.join(' - ');
27 | throw new Error(`The received keyValue is not supported. Send any model from: ${models}`);
28 | }
29 | }
30 |
31 | initiate(keyValue, keyType) {
32 | this.keyType = keyType;
33 |
34 | if (keyType === SupportedFineTuneModels.OPENAI) {
35 | this.openAIWrapper = new OpenAIWrapper(keyValue);
36 | } else {
37 | throw new Error('Invalid provider name');
38 | }
39 | }
40 |
41 | getSupportedModels() {
42 | return Object.values(SupportedFineTuneModels);
43 | }
44 |
45 | async generateFineTune(input) {
46 | if (this.keyType === SupportedFineTuneModels.OPENAI) {
47 | let params;
48 | if (input instanceof FineTuneInput) {
49 | params = input.getOpenAIInput();
50 | } else if (typeof input === 'object') {
51 | params = input;
52 | } else {
53 | throw new Error('Invalid input: Must be an instance of FineTuneInput or a dictionary');
54 | }
55 |
56 | const response = await this.openAIWrapper.storeFineTuningData(params);
57 | return response;
58 | } else {
59 | throw new Error('The keyType is not supported');
60 | }
61 | }
62 |
63 | async listFineTune(input) {
64 | if (this.keyType === SupportedFineTuneModels.OPENAI) {
65 | const response = await this.openAIWrapper.listFineTuningData(input);
66 | return response;
67 | } else {
68 | throw new Error('The keyType is not supported');
69 | }
70 | }
71 |
72 | async uploadFile(filePayload) {
73 | if (this.keyType === SupportedFineTuneModels.OPENAI) {
74 | return await this.openAIWrapper.uploadFile(filePayload);
75 | } else {
76 | throw new Error('The keyType is not supported');
77 | }
78 | }
79 | }
80 |
81 | module.exports = {
82 | RemoteFineTuneModel,
83 | SupportedFineTuneModels,
84 | };
85 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/AzureOpenAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const OpenAIWrapper = require('../../wrappers/OpenAIWrapper');
4 | const proxyHelper = require('../../utils/ProxyHelper').getInstance();
5 | const {
6 | createReadStream, readFileSync, createWriteStream, existsSync
7 | } = require('fs');
8 | let openAI = null;
9 |
10 | async function testLanguageModel() {
11 | try {
12 | const params = {
13 | model: 'davinci_003',
14 | prompt: 'Summarize the plot of the Inception movie in two sentences',
15 | max_tokens: 50,
16 | n: 1,
17 | stop: '',
18 | temperature: 0.7
19 | };
20 |
21 | const result = await openAI.generateText(params);
22 | const responseText = result['choices'][0]['text'].trim();
23 | console.log('Language Model Result:\n', responseText, '\n');
24 | assert(responseText.length > 0, 'testLanguageModel response length should be greater than 0');
25 | } catch (error) {
26 | console.error('Language Model Error:', error);
27 | }
28 | }
29 |
30 | async function testChatGPT() {
31 | try {
32 | const params = {
33 | model: 'gpt_basic',
34 | messages: [
35 | {role: 'system', content: 'You are a helpful assistant.'},
36 | {role: 'user', content: 'Generate a product description for black and white standing desk.'}
37 | ],
38 | max_tokens: 100,
39 | temperature: 0.8
40 | };
41 |
42 | const result = await openAI.generateChatText(params);
43 | const responseText = result['choices'][0]['message']['content'].trim();
44 | console.log('ChatGPT Result: \n', responseText, '\n');
45 | assert(responseText.length > 0, 'testChatGPT response length should be greater than 0');
46 | } catch (error) {
47 | console.error('ChatGPT Error:', error);
48 | }
49 | }
50 |
51 | async function testEmbeddings() {
52 | try {
53 | const params = {
54 | input: 'IntelliNode provide lightning-fast access to the latest deep learning models',
55 | model: 'embed_latest',
56 | };
57 |
58 | const result = await openAI.getEmbeddings(params);
59 | const embeddings = result['data'];
60 | console.log('Embeddings Result:\n', embeddings[0]['embedding'].slice(0, 50), '\n');
61 | assert(embeddings.length > 0, 'testEmbeddings response length should be greater than 0');
62 | } catch (error) {
63 | console.error('Embeddings Error:', error);
64 | }
65 | }
66 |
67 | (async () => {
68 | const args = process.argv.slice(2);
69 | const resourceName = args[0];
70 | // set azure openai parameters
71 | proxyHelper.setAzureOpenai(resourceName);
72 | openAI = new OpenAIWrapper(process.env.AZURE_OPENAI_API_KEY);
73 |
74 | await testLanguageModel();
75 | await testChatGPT();
76 | await testEmbeddings();
77 | // request access to image generation api
78 | // await testImageModel();
79 | })();
80 |
--------------------------------------------------------------------------------
/samples/command_sample/test_llama_chatbot.js:
--------------------------------------------------------------------------------
1 | const { Chatbot, LLamaReplicateInput, LLamaSageInput, SupportedChatModels } = require('intellinode');
2 | // below imports to call the keys from .env file
3 | const dotenv = require('dotenv');
4 | dotenv.config();
5 |
6 | async function callReplicaChatbot(apiKey, provider) {
7 | const chatbot = new Chatbot(apiKey, provider);
8 |
9 | const system = 'You are a helpful assistant.';
10 | const input = new LLamaReplicateInput(system);
11 | input.addUserMessage('Explain the plot of the Inception movie in one line.');
12 |
13 | const response = await chatbot.chat(input);
14 |
15 | console.log(`Chatbot response (${provider}):`);
16 | console.log(`${response}`);
17 |
18 |
19 | }
20 |
21 | async function callReplicaCoderChatbot(apiKey, provider) {
22 | const chatbot = new Chatbot(apiKey, provider);
23 |
24 | const system = 'You are a helpful coder.';
25 | const input = new LLamaReplicateInput(system,
26 | {model: '13b-code-instruct',
27 | max_new_tokens: 128,
28 | top_k: 50,
29 | top_p: 0.9,
30 | temperature: 0.1,
31 | min_new_tokens: 500,
32 | repetition_penalty: 1.15,
33 | stop_sequences: ''});
34 | input.addUserMessage('how to code micro service using node js and express.');
35 |
36 | const response = await chatbot.chat(input);
37 |
38 | console.log(`Chatbot response (${provider}):`);
39 | console.log(`${response}`);
40 |
41 |
42 | }
43 |
44 | async function callSageMakerChatbot(apiKey, provider, url) {
45 |
46 | const chatbot = new Chatbot(apiKey, provider, {url: url});
47 |
48 | const system = 'You are a helpful assistant.';
49 | const input = new LLamaSageInput(system);
50 | input.addUserMessage('Explain the plot of the Inception movie in one line.');
51 |
52 | const response = await chatbot.chat(input);
53 |
54 | console.log(`Chatbot response (${provider}):`);
55 | console.log(`${response}`);
56 |
57 |
58 | }
59 |
60 | (async () => {
61 | // Test chatbot using Llama
62 | console.log('### execute the llama chatbot ###')
63 | await callReplicaChatbot(process.env.REPLICATE_API_KEY, SupportedChatModels.REPLICATE);
64 |
65 | // Test chatbot using Llama codeer
66 | console.log('\n### execute the llama-code chatbot ###')
67 | await callReplicaCoderChatbot(process.env.REPLICATE_API_KEY, SupportedChatModels.REPLICATE);
68 |
69 |
70 | // Test chatbot using Sagemaker Llama private deployment
71 | // uncomment below if you deployed LLama in AWS sagemaker with API gateway
72 |
73 | // console.log('\n### execute the AWS llama chatbot ###')
74 | // await callSageMakerChatbot(null /*replace with api key, if the model not deployed in open gateway*/,
75 | // SupportedChatModels.SAGEMAKER,
76 | // process.env.AWS_API_URL /*replace with API gateway link*/)
77 |
78 | })();
79 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/RemoteSpeechModel.test.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | require('dotenv').config();
3 | const assert = require('assert');
4 | // const { RemoteSpeechModel, SupportedSpeechModels } = require('../controller/RemoteSpeechModel');
5 | const { RemoteSpeechModel, SupportedSpeechModels } = require('../../controller/RemoteSpeechModel')
6 | const Text2SpeechInput = require('../../model/input/Text2SpeechInput');
7 | const AudioHelper = require('../../utils/AudioHelper');
8 |
9 | const remoteSpeechModel = new RemoteSpeechModel(process.env.GOOGLE_API_KEY, SupportedSpeechModels.GOOGLE);
10 | const openAiRemoteSpeechModel = new RemoteSpeechModel(process.env.OPENAI_API_KEY, SupportedSpeechModels.OPENAI);
11 | const audioHelper = new AudioHelper();
12 |
13 | async function testGenerateSpeech() {
14 | try {
15 |
16 | const tempDir = '../temp';
17 | if (!fs.existsSync(tempDir)) {
18 | fs.mkdirSync(tempDir);
19 | }
20 |
21 | const input = new Text2SpeechInput({
22 | text: 'Welcome to Intelligent Node',
23 | language: 'en-gb'
24 | });
25 |
26 | const audioContent = await remoteSpeechModel.generateSpeech(input);
27 |
28 | assert(audioContent.length > 0, 'testGenerateSpeech response length should be greater than 0');
29 |
30 | const decodedAudio = audioHelper.decode(audioContent);
31 | const saved = audioHelper.saveAudio(decodedAudio, tempDir, 'temp.mp3');
32 | assert(saved, 'Audio file should be saved successfully');
33 |
34 | console.log('Test passed: Audio generated and saved successfully');
35 |
36 | } catch (error) {
37 | console.error('Test failed:', error);
38 | }
39 | }
40 |
41 | async function testOpenAiGenerateSpeech() {
42 | try {
43 | const input = new Text2SpeechInput({
44 | model: 'tts-1',
45 | text: "The quick brown fox jumped over the lazy dog.",
46 | voice: "alloy",
47 | stream: true
48 | });
49 |
50 | const result = await openAiRemoteSpeechModel.generateSpeech(input);
51 |
52 | // Create a writable stream and pipe the response data to the stream
53 | const filePath = '../temp/downloaded_audio.mp3'; // Replace with the desired file name and extension
54 |
55 | const writer = fs.createWriteStream(filePath);
56 | result.pipe(writer);
57 |
58 | // Handle the completion of writing the file
59 | writer.on('finish', () => {
60 | const fileExists = fs.existsSync(filePath);
61 | assert(fileExists === true, 'file should be generated on finish')
62 | console.log('Audio file downloaded successfully!');
63 | });
64 |
65 | // Handle any errors that may occur during the download process
66 | writer.on('error', (err) => {
67 | console.error('Error downloading the audio file:', err);
68 | });
69 |
70 | } catch (error) {
71 | console.error('Test failed:', error);
72 | }
73 | }
74 |
75 | (async () => {
76 | // await testGenerateSpeech();
77 | await testOpenAiGenerateSpeech();
78 | })();
79 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/TextAnalyzer.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const { TextAnalyzer } = require('../../function/TextAnalyzer');
4 | const { SupportedLangModels } = require('../../controller/RemoteLanguageModel');
5 |
6 | const openaiApiKey = process.env.OPENAI_API_KEY;
7 | const cohereApiKey = process.env.COHERE_API_KEY;
8 |
9 | const openaiTextAnalyzer = new TextAnalyzer(openaiApiKey, SupportedLangModels.OPENAI);
10 | const cohereTextAnalyzer = new TextAnalyzer(cohereApiKey, SupportedLangModels.COHERE);
11 |
12 | async function testOpenAISummarize() {
13 | const text = 'IntelliNode is a javascript library that integrates cutting-edge AI models into your project. With its intuitive functions, you can easily feed data to models like ChatGPT, WaveNet, and Stable diffusion and receive generated text, speech, or images. It also offers high-level functions such as semantic search and chatbot capabilities.';
14 | const summary = await openaiTextAnalyzer.summarize(text);
15 | console.log('OpenAI Summary:', summary);
16 | assert(summary.length > 0, 'Test passed');
17 | }
18 |
19 | async function testCohereSummarize() {
20 | const text = 'IntelliNode is a javascript library that integrates cutting-edge AI models into your project. With its intuitive functions, you can easily feed data to models like ChatGPT, WaveNet, and Stable diffusion and receive generated text, speech, or images. It also offers high-level functions such as semantic search and chatbot capabilities.';
21 | const summary = await cohereTextAnalyzer.summarize(text);
22 | console.log('Cohere Summary:', summary);
23 | assert(summary.length > 0, 'Test passed');
24 | }
25 |
26 | async function testOpenAISentimentAnalysis() {
27 | console.log('** start testOpenAISentimentAnalysis **');
28 | const text = 'IntelliNode is an amazing AI library that makes it easy to integrate various AI models. I love using it!';
29 | const sentiment = await openaiTextAnalyzer.sentimentAnalysis(text);
30 | console.log('OpenAI Sentiment Analysis:', sentiment);
31 | assert(sentiment.results && sentiment.results.positive !== undefined && sentiment.results.negative !== undefined && sentiment.results.neutral !== undefined, 'Test passed');
32 | }
33 |
34 | async function testCohereSentimentAnalysis() {
35 | console.log('** start testCohereSentimentAnalysis **');
36 | const text = 'IntelliNode is an amazing AI library that makes it easy to integrate various AI models. I love using it!';
37 | const sentiment = await cohereTextAnalyzer.sentimentAnalysis(text);
38 | console.log('Cohere Sentiment Analysis:', sentiment);
39 | assert(sentiment.results && sentiment.results.positive !== undefined && sentiment.results.negative !== undefined && sentiment.results.neutral !== undefined, 'Test passed');
40 | }
41 |
42 | (async () => {
43 | await testOpenAISummarize();
44 | await testCohereSummarize();
45 | await testOpenAISentimentAnalysis();
46 | await testCohereSentimentAnalysis();
47 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/integration/RemoteEmbedModel.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const config = require('../../config.json');
4 | const { RemoteEmbedModel, SupportedEmbedModels } = require('../../controller/RemoteEmbedModel');
5 | const EmbedInput = require('../../model/input/EmbedInput');
6 |
7 | const openaiApiKey = process.env.OPENAI_API_KEY;
8 | const cohereApiKey = process.env.COHERE_API_KEY;
9 | const replicateApiKey = process.env.REPLICATE_API_KEY
10 | const geminiApiKey = process.env.GEMINI_API_KEY;
11 |
12 | const openaiEmbedModel = new RemoteEmbedModel(openaiApiKey, SupportedEmbedModels.OPENAI);
13 | const cohereEmbedModel = new RemoteEmbedModel(cohereApiKey, SupportedEmbedModels.COHERE);
14 | const replicateEmbedModel = new RemoteEmbedModel(replicateApiKey, SupportedEmbedModels.REPLICATE);
15 | const geminiEmbedModel = new RemoteEmbedModel(geminiApiKey, SupportedEmbedModels.GEMINI);
16 |
17 | async function testOpenAIEmbeddings() {
18 | console.log('Start testOpenAIEmbeddings');
19 |
20 | const embedInput = new EmbedInput({
21 | texts: ['Hello from OpenAI!', '您好,来自 OpenAI!'],
22 | model: 'text-embedding-3-small',
23 | });
24 |
25 | const results = await openaiEmbedModel.getEmbeddings(embedInput);
26 | console.log('OpenAI Embeddings:', results, '\n');
27 | assert(results.length > 0, 'OpenAI Embeddings Test passed');
28 | }
29 |
30 | async function testCohereEmbeddings() {
31 | console.log('Start testCohereEmbeddings');
32 |
33 | const embedInput = new EmbedInput({
34 | texts: ['Hello from Cohere!', '您好,来自 Cohere!'],
35 | model: 'embed-multilingual-v2.0',
36 | });
37 |
38 | const results = await cohereEmbedModel.getEmbeddings(embedInput);
39 | console.log('Cohere Embeddings:', results, '\n');
40 | assert(results.length > 0, 'Cohere Embeddings Test passed');
41 | }
42 |
43 | async function testReplicateEmbeddings() {
44 | console.log('Start testReplicateEmbeddings');
45 |
46 | const embedInput = new EmbedInput({
47 | texts: ['Hello from Replicate!', 'Hola desde Replicate!'],
48 | model: config.models.replicate.llama['llama-2-13b-embeddings-version'],
49 | });
50 |
51 | const results = await replicateEmbedModel.getEmbeddings(embedInput);
52 | console.log('Replicate Embeddings:', results, '\n');
53 |
54 | assert(results.length === embedInput.texts.length && results.every(embedding => embedding.length > 0),
55 | 'Replicate Embeddings Test passed');
56 | }
57 |
58 | async function testGeminiEmbeddings() {
59 | console.log('Start testGeminiEmbeddings');
60 |
61 | const embedInput = new EmbedInput({
62 | texts: ['Hello from Gemini!', 'Hallo von Gemini!'],
63 | model: 'models/embedding-001',
64 | });
65 |
66 | const results = await geminiEmbedModel.getEmbeddings(embedInput);
67 | console.log('Gemini Embeddings:', results, '\n');
68 |
69 | assert(results && results.values, 'Gemini Embeddings Test passed');
70 | }
71 |
72 | (async () => {
73 | await testOpenAIEmbeddings();
74 | await testCohereEmbeddings();
75 | // await testReplicateEmbeddings();
76 | await testGeminiEmbeddings();
77 | })();
--------------------------------------------------------------------------------
/IntelliNode/controller/RemoteImageModel.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 | */
6 | const SupportedImageModels = {
7 | OPENAI: "openai",
8 | STABILITY: "stability",
9 | };
10 |
11 | const OpenAIWrapper = require("../wrappers/OpenAIWrapper");
12 | const StabilityAIWrapper = require("../wrappers/StabilityAIWrapper");
13 | const ImageModelInput = require("../model/input/ImageModelInput");
14 |
15 | class RemoteImageModel {
16 | constructor(keyValue, provider) {
17 | if (!provider) {
18 | provider = SupportedImageModels.OPENAI;
19 | }
20 |
21 | const supportedModels = RemoteImageModel.getSupportedModels();
22 |
23 | if (supportedModels.includes(provider)) {
24 | this.initiate(keyValue, provider);
25 | } else {
26 | const models = supportedModels.join(" - ");
27 | throw new Error(
28 | `The received keyValue is not supported. Send any model from: ${models}`
29 | );
30 | }
31 | }
32 |
33 | initiate(keyValue, keyType) {
34 | this.keyType = keyType;
35 |
36 | if (keyType === SupportedImageModels.OPENAI) {
37 | this.openaiWrapper = new OpenAIWrapper(keyValue);
38 | } else if (keyType === SupportedImageModels.STABILITY) {
39 | this.stabilityWrapper = new StabilityAIWrapper(keyValue);
40 | } else {
41 | throw new Error("Invalid provider name");
42 | }
43 | }
44 |
45 | static getSupportedModels() {
46 | return Object.values(SupportedImageModels);
47 | }
48 |
49 | async generateImages(imageInput) {
50 | let inputs;
51 |
52 | if (imageInput instanceof ImageModelInput) {
53 | if (this.keyType === SupportedImageModels.OPENAI) {
54 | inputs = imageInput.getOpenAIInputs();
55 | } else if (this.keyType === SupportedImageModels.STABILITY) {
56 | inputs = imageInput.getStabilityInputs();
57 | } else {
58 | throw new Error("The keyType is not supported");
59 | }
60 | } else if (typeof imageInput === "object") {
61 | inputs = imageInput;
62 | } else {
63 | throw new Error(
64 | "Invalid input: Must be an instance of ImageModelInput or a dictionary"
65 | );
66 | }
67 |
68 | if (this.keyType === SupportedImageModels.OPENAI) {
69 | const results = await this.openaiWrapper.generateImages(inputs);
70 |
71 | /*console.log('results: ', results)*/
72 |
73 | return results.data.map((data) => {
74 | if (data.url) {
75 | return data.url;
76 | } else if (data.b64_json) {
77 | return data.b64_json;
78 | } else {
79 | throw new Error('Unexpected image data format');
80 | }
81 | });
82 |
83 | } else if (this.keyType === SupportedImageModels.STABILITY) {
84 |
85 | const results = await this.stabilityWrapper.generateImageDispatcher(inputs);
86 |
87 | return results.artifacts.map((imageObj) => imageObj.base64);
88 |
89 | } else {
90 | throw new Error(`This version supports ${SupportedImageModels.OPENAI} keyType only`);
91 | }
92 | }
93 | }
94 |
95 | module.exports = {
96 | RemoteImageModel,
97 | SupportedImageModels,
98 | };
--------------------------------------------------------------------------------
/IntelliNode/function/SemanticSearch.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const { RemoteEmbedModel, SupportedEmbedModels } = require('../controller/RemoteEmbedModel');
9 | const EmbedInput = require('../model/input/EmbedInput');
10 | const MatchHelpers = require('../utils/MatchHelpers');
11 |
12 | class SemanticSearch {
13 | constructor(keyValue, provider = SupportedEmbedModels.OPENAI, customProxyHelper = null) {
14 | this.keyValue = keyValue;
15 | this.provider = provider;
16 |
17 | this.remoteEmbedModel = new RemoteEmbedModel(keyValue, provider, customProxyHelper);
18 | }
19 |
20 | async getTopMatches(pivotItem, searchArray, numberOfMatches, modelName = null) {
21 |
22 | if (numberOfMatches > searchArray.length) {
23 | throw new Error('numberOfMatches should not be greater than the searchArray');
24 | }
25 |
26 | const embedInput = new EmbedInput({
27 | texts: [pivotItem, ...searchArray],
28 | model: modelName
29 | });
30 |
31 | if (modelName == null) {
32 | embedInput.setDefaultValues(this.provider);
33 | }
34 |
35 | const embeddingsResponse = await this.remoteEmbedModel.getEmbeddings(embedInput);
36 |
37 | // Extract embeddings based on the provider
38 | let embeddings;
39 | if (this.provider === SupportedEmbedModels.OPENAI) {
40 | embeddings = embeddingsResponse.map((item) => item.embedding);
41 | } else if (this.provider === SupportedEmbedModels.COHERE) {
42 | embeddings = embeddingsResponse.map((item) => item.embedding);
43 | } else {
44 | throw new Error('Invalid provider name');
45 | }
46 |
47 | const pivotEmbedding = embeddings[0];
48 | const searchEmbeddings = embeddings.slice(1);
49 |
50 | return this.getTopMatchesFromEmbeddings(pivotEmbedding, searchEmbeddings, numberOfMatches);
51 | }
52 |
53 | getTopVectorMatches(pivotEmbedding, searchEmbeddings, numberOfMatches) {
54 | if (numberOfMatches >= searchEmbeddings.length) {
55 | throw new Error('numberOfMatches should be less than the length of the searchEmbeddings');
56 | }
57 |
58 | return this.getTopMatchesFromEmbeddings(pivotEmbedding, searchEmbeddings, numberOfMatches);
59 | }
60 |
61 | getTopMatchesFromEmbeddings(pivotEmbedding, searchEmbeddings, numberOfMatches) {
62 | const similarities = searchEmbeddings.map((embedding) => MatchHelpers.cosineSimilarity(pivotEmbedding, embedding));
63 | const sortedIndices = this.argsort(similarities).reverse();
64 | const topMatchesIndices = sortedIndices.slice(0, numberOfMatches);
65 |
66 | return topMatchesIndices.map((index) => ({ index, similarity: similarities[index] }));
67 | }
68 |
69 | argsort(array) {
70 | const arrayObject = array.map((value, index) => ({ value, index }));
71 | arrayObject.sort((a, b) => a.value - b.value);
72 | return arrayObject.map((item) => item.index);
73 | }
74 |
75 | filterTopMatches(searchResults, originalArray) {
76 | return searchResults.map(result => (originalArray[result.index]));
77 | }
78 | }
79 |
80 | module.exports = { SemanticSearch };
81 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/ChatbotCohere.test.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const assert = require("assert");
3 | const { Chatbot, SupportedChatModels } = require("../../function/Chatbot");
4 | const { CohereStreamParser } = require('../../utils/StreamParser');
5 | const { ChatGPTMessage,
6 | CohereInput
7 | } = require("../../model/input/ChatModelInput");
8 |
9 | // env key
10 | const apiKey = process.env.COHERE_API_KEY;
11 |
12 | // openai bot
13 | const bot = new Chatbot(apiKey, SupportedChatModels.COHERE);
14 |
15 | async function testChatGPTCase1() {
16 | try {
17 | console.log('\nchat test case 1: \n')
18 | const mode = "You are a helpful astronomy assistant.";
19 | const input = new CohereInput(mode);
20 | input.addUserMessage("what is the space between moon and earth");
21 |
22 | const responses = await bot.chat(input);
23 |
24 | responses.forEach((response) => console.log("- " + response));
25 |
26 | assert(responses.length > 0, "testOpenaiChatGPTCase1 response length should be greater than 0");
27 | } catch (error) {
28 | console.error("Test case failed with exception:", error.message);
29 | }
30 | }
31 |
32 | async function testChatGPTCase2() {
33 | try {
34 | console.log('\nchat test case 2: \n')
35 | const mode = "You are a helpful astronomy assistant.";
36 | const input = new CohereInput(mode);
37 | input.addUserMessage("Explain the plot of the Inception movie in one line");
38 | input.addAssistantMessage("The plot of the movie Inception follows a skilled thief who enters people's dreams to steal their secrets and is tasked with implanting an idea into a target's mind to alter their future actions.");
39 | input.addUserMessage("Explain the plot of the dark night movie in one line");
40 |
41 | const responses = await bot.chat(input);
42 |
43 | responses.forEach((response) => console.log("- " + response));
44 |
45 | assert(responses.length > 0, "test case 2 response length should be greater than 0");
46 | } catch (error) {
47 | console.error("Test case failed with exception:", error.message);
48 | }
49 | }
50 |
51 | async function testChatGPTCase3() {
52 | try {
53 |
54 | console.log('\nchat test case 4: \n')
55 |
56 | const mode = "You are a helpful astronomy assistant.";
57 | const input = new CohereInput(mode);
58 |
59 | input.addUserMessage("Explain the plot of the Inception movie in one line");
60 | input.addAssistantMessage("The plot of the movie Inception follows a skilled thief who enters people's dreams to steal their secrets and is tasked with implanting an idea into a target's mind to alter their future actions.");
61 | input.addUserMessage("Explain the plot of the dark night movie in one line");
62 |
63 | let response = '';
64 | for await (const contentText of bot.stream(input)) {
65 | response += contentText;
66 | console.log('Received chunk:', contentText);
67 | }
68 |
69 | assert(response.length > 0, "Test case 3 response length should be greater than 0");
70 |
71 | } catch (error) {
72 | console.error("Test case failed with exception:", error.message);
73 | }
74 |
75 |
76 | }
77 |
78 | (async () => {
79 |
80 | console.log('### Cohere model ###')
81 | await testChatGPTCase1();
82 | await testChatGPTCase2();
83 | await testChatGPTCase3();
84 |
85 | })();
--------------------------------------------------------------------------------
/IntelliNode/test/integration/NvidiaWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const NvidiaWrapper = require('../../wrappers/NvidiaWrapper');
4 |
5 | const nvidia = new NvidiaWrapper(process.env.NVIDIA_API_KEY);
6 |
7 | async function testNvidiaGenerateText(mode_name) {
8 | try {
9 | console.log('--- NVIDIA Generate Text ---\n');
10 | const params = {
11 | model: mode_name,
12 | messages: [
13 | {
14 | role: 'user',
15 | content: 'Which number is larger, 9.11 or 9.8?',
16 | },
17 | ],
18 | max_tokens: 1024,
19 | presence_penalty: 0,
20 | frequency_penalty: 0,
21 | top_p: 0.7,
22 | temperature: 0.6,
23 | stream: false,
24 | };
25 |
26 | const result = await nvidia.generateText(params);
27 | console.log('NVIDIA deepseek response:', JSON.stringify(result, null, 2));
28 |
29 | // Basic check that we have at least one choice
30 | assert(
31 | result.choices && result.choices.length > 0,
32 | 'Nvidia response should contain at least one choice'
33 | );
34 |
35 | } catch (error) {
36 | console.error('Nvidia Error:', error);
37 | }
38 | }
39 |
40 | async function testNvidiaStream(model_name) {
41 | try {
42 | console.log('--- NVIDIA Streaming ---\n');
43 | const params = {
44 | model: model_name,
45 | messages: [
46 | {
47 | role: 'user',
48 | content: 'Write a limerick about the wonders of GPU computing.'
49 | }
50 | ],
51 | max_tokens: 1024,
52 | temperature: 0.2,
53 | stream: true, // force streaming
54 | };
55 |
56 | const stream = await nvidia.generateTextStream(params);
57 | console.log('--- NVIDIA Streaming ---\n');
58 |
59 | for await (const chunk of stream) {
60 | // The chunk is likely raw text or JSON lines.
61 | // If you want to parse partial JSON events, do so here.
62 | process.stdout.write(chunk.toString('utf8'));
63 | }
64 |
65 | console.log('\n--- End of NVIDIA Streaming ---\n');
66 |
67 | } catch (error) {
68 | console.error('Nvidia Streaming Error:', error);
69 | }
70 | }
71 |
72 | async function testNvidiaDeepSeekStream(model_name) {
73 | try {
74 | const nvidia = new NvidiaWrapper(process.env.NVIDIA_API_KEY);
75 | const params = {
76 | model: model_name,
77 | messages: [
78 | {
79 | role: 'user',
80 | content: 'Write a short poem about the future of GPUs.'
81 | }
82 | ],
83 | max_tokens: 256,
84 | temperature: 0.6
85 | };
86 |
87 | const stream = await nvidia.generateTextStream(params);
88 |
89 | console.log('\n--- NVIDIA Deep Seek Streaming ---\n');
90 | for await (const chunk of stream) {
91 | process.stdout.write(chunk.toString('utf8'));
92 | }
93 | console.log('\n--- End of Deep Seek Streaming ---\n');
94 | } catch (error) {
95 | console.error('Error during Deep Seek stream:', error);
96 | }
97 | }
98 |
99 | (async () => {
100 | await testNvidiaGenerateText('deepseek-ai/deepseek-r1');
101 | await testNvidiaStream('meta/llama-3.3-70b-instruct');
102 | await testNvidiaDeepSeekStream('deepseek-ai/deepseek-r1');
103 | })();
104 |
--------------------------------------------------------------------------------
/samples/command_sample/automate_s3_bucket.js:
--------------------------------------------------------------------------------
1 | // imports
2 | // const AWS = require('aws-sdk');
3 | const { S3Client, ListBucketsCommand, ListObjectsCommand } = require('@aws-sdk/client-s3');
4 | const { Chatbot, ChatGPTInput, ChatGPTMessage, FunctionModelInput } = require('intellinode');
5 | require('dotenv').config();
6 |
7 | // initialize the objects
8 | const openApikey = process.env.OPENAI_API_KEY;
9 | const bot = new Chatbot(openApikey);
10 | // const s3 = new AWS.S3();
11 | const s3client = new S3Client();
12 |
13 | // initial variables
14 | const gpt_model = "gpt-3.5-turbo-0613"
15 |
16 | // define the functions details
17 | const functions_desc = [
18 | new FunctionModelInput('list_buckets', 'List all available S3 buckets'),
19 | new FunctionModelInput('list_objects', 'List the objects or files inside an S3 bucket', {
20 | type: 'object',
21 | properties: {
22 | bucket: { type: 'string', description: 'The name of the S3 bucket' },
23 | prefix: { type: 'string', description: 'The folder path in the S3 bucket' }
24 | },
25 | required: ['bucket']
26 | })
27 | ];
28 |
29 | // define the implementation mapper
30 | const functions_dict = {
31 | list_buckets: async () => {
32 | const command = new ListBucketsCommand({});
33 | const response = await s3client.send(command);
34 | return response.Buckets;
35 | },
36 | list_objects: async (bucket, prefix = '') => {
37 | console.log('Bucket name: ', bucket)
38 | const command = new ListObjectsCommand({
39 | Bucket: bucket,
40 | Prefix: prefix,
41 | });
42 | const response = await s3client.send(command);
43 | return response.Contents;
44 | },
45 | // define other S3 bucket functions
46 | };
47 |
48 | async function runOneshotConversation(userInput, topic = "s3 bucket functions.", isLog = false) {
49 | console.log('\nRunning oneshot conversation: \n');
50 |
51 | const systemMessage = `Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous. If the user asks a question not related to ${topic}, respond within the scope of ${topic}.`;
52 |
53 | const input = new ChatGPTInput(systemMessage, { model: gpt_model });
54 | input.addMessage(new ChatGPTMessage(userInput, "user"));
55 |
56 | const responses = await bot.chat(input, functions_desc.map(f => f.getFunctionModelInput()));
57 |
58 | let finalMessage;
59 |
60 | const response = responses[0];
61 | if (typeof response === "object") {
62 | const functionName = response.function_call.name;
63 | const functionArgs = JSON.parse(response.function_call.arguments);
64 |
65 | // call the function
66 | const functionResponse = await functions_dict[functionName](...Object.values(functionArgs));
67 | // console.log("Function response: ", functionResponse);
68 |
69 | // add the response to the conversation
70 | input.addMessage(new ChatGPTMessage(JSON.stringify(functionResponse), "function", functionName));
71 |
72 | const secondResponses = await bot.chat(input);
73 |
74 | finalMessage = secondResponses.join('\n');
75 |
76 | } else {
77 | finalMessage = response;
78 |
79 | }
80 |
81 | return finalMessage;
82 | }
83 |
84 |
85 |
86 | (async () => {
87 | // Test chatbot using OpenAI
88 | model_response = await runOneshotConversation("List my s3 buckets");
89 | console.log("the model message:\n", model_response)
90 | })();
91 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/GeminiAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const GeminiAIWrapper = require('../../wrappers/GeminiAIWrapper');
4 | const gemini = new GeminiAIWrapper(process.env.GEMINI_API_KEY);
5 |
6 | async function testGeminiAIWrapper() {
7 | try {
8 | const params = {
9 | "contents": [{
10 | "parts": [{
11 | "text": "Write a story about a magic backpack."
12 | }]
13 | }]
14 | };
15 |
16 | const result = await gemini.generateContent(params);
17 |
18 | if (Array.isArray(result.candidates)) {
19 | let generatedText = result.candidates[0]?.content?.parts[0]?.text;
20 | console.log('Gemini AI Content Generation Test Result:\n', generatedText, '\n');
21 | assert(generatedText, 'Gemini AI returned no results');
22 | } else {
23 | console.log('Unexpected output format from Gemini API');
24 | }
25 | } catch (error) {
26 | console.error('Gemini AI Error:', error);
27 | }
28 | }
29 |
30 | async function testImageToText() {
31 | try {
32 |
33 | const filePath = '../temp/test_image_desc.png'
34 | const result = await gemini.imageToText('describe the image', filePath, 'png');
35 |
36 | if (Array.isArray(result.candidates)) {
37 | let generatedText = result.candidates[0]?.content?.parts[0]?.text;
38 | console.log('Gemini AI Image To Text Generation Test Result:\n', generatedText, '\n');
39 | assert(generatedText, 'Gemini AI returned no results');
40 | } else {
41 | console.log('Unexpected output format from Gemini API');
42 | }
43 | } catch (error) {
44 | console.error('Gemini AI Error:', error);
45 | }
46 | }
47 |
48 | async function testGetEmbeddings() {
49 | try {
50 | const text = "Write a story about a magic backpack.";
51 | const params = {
52 | model: "models/embedding-001",
53 | content: {
54 | parts: [{
55 | text: text
56 | }]
57 | }
58 | };
59 |
60 | const result = await gemini.getEmbeddings(params);
61 | console.log('Gemini Single Embedding Test Result:\n', result, '\n');
62 | assert(result && result.values, 'Gemini AI returned no embedding results');
63 | } catch (error) {
64 | console.error('Gemini Embedding Error:', error);
65 | }
66 | }
67 |
68 | async function testGetBatchEmbeddings() {
69 | try {
70 | const texts = ["Hello world", "Write a story about a magic backpack."];
71 | const requests = texts.map(text => ({
72 | model: "models/embedding-001",
73 | content: {
74 | parts: [{ text }]
75 | }
76 | }));
77 |
78 | const result = await gemini.getBatchEmbeddings({ requests });
79 | console.log('Gemini Batch Embedding Test Result:\n', result, '\n');
80 | assert(result && result.length > 0 && result.every(e => e.values && e.values.length > 0),
81 | 'Gemini AI returned no batch embedding results');
82 | } catch (error) {
83 | console.error('Gemini Batch Embedding Error:', error);
84 | }
85 | }
86 |
87 |
88 | (async () => {
89 | await testGeminiAIWrapper();
90 | await testImageToText();
91 | await testGetEmbeddings();
92 | await testGetBatchEmbeddings();
93 | })();
--------------------------------------------------------------------------------
/samples/command_sample/ecommerce_tool.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const dotenv = require('dotenv');
3 | dotenv.config();
4 |
5 | const {
6 | RemoteLanguageModel,
7 | SupportedLangModels,
8 | LanguageModelInput,
9 | Chatbot,
10 | ChatGPTInput,
11 | RemoteImageModel,
12 | SupportedImageModels,
13 | ImageModelInput,
14 | RemoteSpeechModel,
15 | Text2SpeechInput,
16 | AudioHelper,
17 | } = require('intellinode');
18 |
19 | const MyKeys = {
20 | openai: process.env.OPENAI_API_KEY,
21 | cohere: process.env.COHERE_API_KEY,
22 | stability: process.env.STABILITY_API_KEY,
23 | google: process.env.GOOGLE_API_KEY,
24 | };
25 |
26 | const audioHelper = new AudioHelper();
27 |
28 |
29 | async function main() {
30 | // 1- Generate product description
31 | const textModelInput = 'Write a creative product description for gaming chair with black and red colors';
32 | const textProductDesc = await generateProductDescription(textModelInput, MyKeys.cohere, 'cohere');
33 | console.log('- Product description:\n', textProductDesc);
34 |
35 | // 2- Generate image description
36 | const imageDescription = await getImageDescription(textProductDesc, MyKeys.openai, 'openai');
37 | console.log('\n- Image description:\n', imageDescription);
38 |
39 | // 3- Generate multiple images
40 | const images = await generateImage(imageDescription, MyKeys.stability, SupportedImageModels.STABILITY);
41 | console.log('save the product images in the temp folder');
42 | images.forEach((image, index) => {
43 | fs.writeFileSync(`./temp/product_image${index + 1}.png`, image, { encoding: 'base64' });
44 | });
45 |
46 | // 4- Generate audio
47 | const decodedAudio = await generateSpeech(textProductDesc, MyKeys.google, 'google');
48 | audioHelper.saveAudio(decodedAudio, './temp', 'product_description.mp3');
49 | console.log('Audio generated');
50 | }
51 |
52 | async function generateProductDescription(textInput, apiKey, modelBackend) {
53 | const modelName = (modelBackend === SupportedLangModels.OPENAI) ? 'gpt-3.5-turbo-instruct' : 'command';
54 | const langModel = new RemoteLanguageModel(apiKey, modelBackend);
55 | const results = await langModel.generateText(new LanguageModelInput({
56 | prompt: textInput,
57 | model: modelName,
58 | maxTokens: 300
59 | }));
60 | return results[0].trim();
61 | }
62 |
63 | async function getImageDescription(textInput, apiKey, modelBackend) {
64 | const chatbot = new Chatbot(apiKey, modelBackend);
65 | const input = new ChatGPTInput('generate image description from paragraph to use it as prompt to generate image from DALL·E or stable diffusion image model. return only the image description to use it as direct input');
66 | input.addUserMessage(textInput);
67 | const responses = await chatbot.chat(input);
68 | return responses[0].trim();
69 | }
70 |
71 | async function generateImage(imageText, apiKey, modelBackend) {
72 | const imgModel = new RemoteImageModel(apiKey, modelBackend);
73 | const imageInput = new ImageModelInput({
74 | prompt: imageText,
75 | numberOfImages: 3,
76 | width: 512,
77 | height: 512
78 | });
79 | return await imgModel.generateImages(imageInput);
80 | }
81 |
82 | async function generateSpeech(textProductDesc, apiKey, modelBackend) {
83 | const speechModel = new RemoteSpeechModel(apiKey);
84 | const input = new Text2SpeechInput({ text: textProductDesc, language: 'en-gb' });
85 | const audioContent = await speechModel.generateSpeech(input);
86 |
87 | return audioHelper.decode(audioContent);
88 | }
89 |
90 | main();
91 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/GenNvidia.test.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const assert = require("assert");
3 | const fs = require("fs");
4 | const path = require("path");
5 | const { Gen } = require("../../function/Gen");
6 | const { SupportedChatModels } = require("../../function/Chatbot");
7 |
8 | // Use NVIDIA API key from the environment.
9 | const nvidiaApiKey = process.env.NVIDIA_API_KEY;
10 |
11 | // Test marketing description (NVIDIA)
12 | async function testNvidiaMarketingDesc() {
13 | const prompt = "gaming chair.";
14 | const desc = await Gen.get_marketing_desc(prompt, nvidiaApiKey, SupportedChatModels.NVIDIA);
15 | console.log("NVIDIA Marketing Desc:", desc);
16 | assert(desc.length > 0, "Marketing description should not be empty.");
17 | // Ensure no tag remains.
18 | assert(!desc.includes(""), "Response should not contain tag.");
19 | }
20 |
21 | // Test blog post (NVIDIA)
22 | async function testNvidiaBlogPost() {
23 | const prompt = "AI in art blog post.";
24 | const blog = await Gen.get_blog_post(prompt, nvidiaApiKey, SupportedChatModels.NVIDIA);
25 | console.log("NVIDIA Blog Post:", blog);
26 | assert(blog.length > 0, "Blog post should not be empty.");
27 | assert(!blog.includes(""), "Response should not contain tag.");
28 | }
29 |
30 | // Test HTML page generation (NVIDIA)
31 | async function testNvidiaHtmlPage() {
32 | const tempDir = path.join(__dirname, "../temp");
33 | if (!fs.existsSync(tempDir)) fs.mkdirSync(tempDir);
34 | const text = "a registration page with flat modern theme.";
35 | const htmlCode = await Gen.generate_html_page(text, nvidiaApiKey, "deepseek", SupportedChatModels.NVIDIA);
36 | console.log("NVIDIA HTML Page:", htmlCode);
37 | fs.writeFileSync(path.join(tempDir, "nvidia_generated_page.html"), htmlCode["html"]);
38 | assert(htmlCode["html"].length > 0, "HTML output should not be empty.");
39 | }
40 |
41 | // Test dashboard generation (NVIDIA)
42 | async function testNvidiaDashboard() {
43 | const tempDir = path.join(__dirname, "../temp");
44 | if (!fs.existsSync(tempDir)) fs.mkdirSync(tempDir);
45 | const csvData = "Title,Value\nGraph1,100\nGraph2,200"; // simplified CSV data
46 | const topic = "Monthly Hospital Activity";
47 | const dashboardOutput = await Gen.generate_dashboard(csvData, topic, nvidiaApiKey, "deepseek", 2, SupportedChatModels.NVIDIA);
48 | console.log("NVIDIA Dashboard:", dashboardOutput);
49 | fs.writeFileSync(path.join(tempDir, "nvidia_dashboard.html"), dashboardOutput["html"]);
50 | assert(dashboardOutput["html"].length > 0, "Dashboard HTML should not be empty.");
51 | }
52 |
53 | // Test instruct update (NVIDIA)
54 | async function testNvidiaInstructUpdate() {
55 | const modelOutput = "{\"html\": \"Title1
\"";
56 | const userInstruction = "fix the format";
57 | const type = "json with html content";
58 | const fixedOutput = await Gen.instructUpdate(modelOutput, userInstruction, type, nvidiaApiKey, "deepseek", SupportedChatModels.NVIDIA);
59 | console.log("NVIDIA Instruct Update:", fixedOutput);
60 | assert(fixedOutput.length > 0, "Instruct update output should not be empty.");
61 | assert(!fixedOutput.includes(""), "Response should not contain tag.");
62 | }
63 |
64 | (async () => {
65 | console.log("Running NVIDIA Gen tests...");
66 | await testNvidiaMarketingDesc();
67 | await testNvidiaBlogPost();
68 | await testNvidiaHtmlPage();
69 | await testNvidiaDashboard();
70 | await testNvidiaInstructUpdate();
71 | console.log("All NVIDIA Gen tests passed.");
72 | })();
73 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/NvidiaNimWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const NvidiaWrapper = require('../../wrappers/NvidiaWrapper');
4 |
5 | // localBaseUrl default value (http://localhost:8000) for local testing
6 | const localBaseUrl = process.env.NVIDIA_NIM_BASE_URL || 'http://localhost:8000';
7 | const apiKey = process.env.NVIDIA_API_KEY;
8 | const nvidiaLocal = new NvidiaWrapper(apiKey, { baseUrl: localBaseUrl });
9 |
10 | /**
11 | * Test chat completions (non-streaming) using NVIDIA NIM.
12 | */
13 | async function testNimChatCompletion() {
14 | console.log('--- Testing NVIDIA NIM Chat Completion ---');
15 | const params = {
16 | model: 'meta/llama-3.1-8b-instruct',
17 | messages: [
18 | { role: 'user', content: 'Write a limerick about GPU computing.' }
19 | ],
20 | max_tokens: 64,
21 | temperature: 0.5,
22 | top_p: 1,
23 | stream: false
24 | };
25 |
26 | try {
27 | const response = await nvidiaLocal.generateText(params);
28 | console.log('Chat Completion Response:', JSON.stringify(response, null, 2));
29 | assert(response.choices && response.choices.length > 0, 'No choices returned from chat completion');
30 | } catch (error) {
31 | console.error('Error in chat completion:', error);
32 | }
33 | }
34 |
35 | /**
36 | * Test chat completions using streaming via NVIDIA NIM.
37 | */
38 | async function testNimChatStream() {
39 | console.log('--- Testing NVIDIA NIM Chat Streaming ---');
40 | const params = {
41 | model: 'meta/llama-3.1-8b-instruct',
42 | messages: [
43 | { role: 'user', content: 'Compose a short poem about GPUs.' }
44 | ],
45 | max_tokens: 64,
46 | temperature: 0.5,
47 | top_p: 1,
48 | stream: true
49 | };
50 |
51 | try {
52 | const stream = await nvidiaLocal.generateTextStream(params);
53 | let collected = '';
54 | // For Node.js, we assume the returned stream is a ReadableStream.
55 | for await (const chunk of stream) {
56 | const text = chunk.toString('utf8');
57 | process.stdout.write(text);
58 | collected += text;
59 | }
60 | console.log('\nCollected stream output:', collected);
61 | assert(collected.length > 0, 'No text received in streaming response');
62 | } catch (error) {
63 | console.error('Error in streaming chat:', error);
64 | }
65 | }
66 |
67 | /**
68 | * Test embeddings using NVIDIA NIM.
69 | */
70 | async function testNimEmbeddings() {
71 | console.log('--- Testing NVIDIA NIM Embeddings ---');
72 | const params = {
73 | input: ['What is the capital of France?'],
74 | model: 'snowflake/arctic-embed-l',
75 | input_type: 'query',
76 | encoding_format: 'float',
77 | truncate: 'NONE'
78 | };
79 |
80 | try {
81 | const embeddings = await nvidiaLocal.generateEmbeddings(params);
82 | console.log('Embeddings Response:', embeddings);
83 | // expect array
84 | assert(Array.isArray(embeddings), 'Embeddings response should be an array');
85 | embeddings.forEach((emb, idx) => {
86 | if (typeof emb !== 'number') {
87 | if (Array.isArray(emb)) {
88 | assert(typeof emb[0] === 'number', `Embedding at index ${idx} is not numeric`);
89 | }
90 | }
91 | });
92 | } catch (error) {
93 | console.error('Error in embeddings:', error);
94 | }
95 | }
96 |
97 | (async () => {
98 | await testNimChatCompletion();
99 | await testNimChatStream();
100 | //await testNimEmbeddings();
101 | })();
102 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/StabilityAIWrapperStyle.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const fs = require('fs');
3 | const path = require('path');
4 | const StabilityAIWrapper = require('../../wrappers/StabilityAIWrapper');
5 |
6 | // Expected tun command:
7 | // node StabilityAIWrapperStyle.js /absolute/path/to/image.png
8 |
9 | const imagePath = process.argv[2];
10 | if (!imagePath) {
11 | console.error('Usage: node StabilityAIWrapperStyle.js ');
12 | process.exit(1);
13 | }
14 |
15 | // Make sure we have a folder ../temp/stability/ to store images:
16 | const outputDir = path.join(__dirname, '../../temp/stability');
17 | if (!fs.existsSync(outputDir)) {
18 | fs.mkdirSync(outputDir, { recursive: true });
19 | }
20 |
21 | // A helper to extract the base file name
22 | const baseName = path.parse(imagePath).name;
23 |
24 | // Initialize the wrapper
25 | const stability = new StabilityAIWrapper(process.env.STABILITY_API_KEY);
26 |
27 | async function testControlSketch() {
28 | try {
29 | const responseBuffer = await stability.controlSketch({
30 | imagePath,
31 | prompt: 'A medieval castle on a hill, painterly style',
32 | control_strength: 0.7,
33 | output_format: 'png',
34 | // style_preset: 'photographic', // optional example
35 | // negative_prompt: 'ugly face', // optional example
36 | // seed: 12345, // optional
37 | accept: 'image/*' // request raw image
38 | });
39 |
40 | // Save result as "cat_sketch_edit.png" (example)
41 | const outName = `${baseName}_sketch_edit.png`;
42 | const outPath = path.join(outputDir, outName);
43 |
44 | fs.writeFileSync(outPath, Buffer.from(responseBuffer), 'binary');
45 | console.log(`Sketch image saved to: ${outPath}`);
46 | } catch (err) {
47 | console.error('Error in testControlSketch:', err);
48 | }
49 | }
50 |
51 | async function testControlStructure() {
52 | try {
53 | const responseBuffer = await stability.controlStructure({
54 | imagePath,
55 | prompt: 'A well manicured shrub in an English garden, photorealistic',
56 | control_strength: 0.7,
57 | output_format: 'webp',
58 | accept: 'image/*'
59 | });
60 |
61 | const outName = `${baseName}_structure_edit.webp`;
62 | const outPath = path.join(outputDir, outName);
63 |
64 | fs.writeFileSync(outPath, Buffer.from(responseBuffer), 'binary');
65 | console.log(`Structure image saved to: ${outPath}`);
66 | } catch (err) {
67 | console.error('Error in testControlStructure:', err);
68 | }
69 | }
70 |
71 | async function testControlStyle() {
72 | try {
73 | const responseBuffer = await stability.controlStyle({
74 | imagePath,
75 | prompt: 'Oil painting portrait of me as a Victorian king, highly detailed, dramatic lighting',
76 | // optional extras:
77 | // negative_prompt: 'blurry, lowres',
78 | // aspect_ratio: '16:9',
79 | // fidelity: 0.5,
80 | // seed: 987654,
81 | output_format: 'png',
82 | accept: 'image/*'
83 | });
84 |
85 | const outName = `${baseName}_style_edit.png`;
86 | const outPath = path.join(outputDir, outName);
87 |
88 | fs.writeFileSync(outPath, Buffer.from(responseBuffer), 'binary');
89 | console.log(`Style image saved to: ${outPath}`);
90 | } catch (err) {
91 | console.error('Error in testControlStyle:', err);
92 | }
93 | }
94 |
95 | // Run them all in sequence
96 | (async () => {
97 | await testControlSketch();
98 | await testControlStructure();
99 | await testControlStyle();
100 | })();
101 |
--------------------------------------------------------------------------------
/IntelliNode/utils/FetchClient.js:
--------------------------------------------------------------------------------
1 | const fetch = require('cross-fetch');
2 | const FormData = require('form-data');
3 |
4 | class FetchClient {
5 | constructor({ baseURL = '', headers = {} } = {}) {
6 | this.baseURL = baseURL;
7 | this.defaultHeaders = headers;
8 | }
9 |
10 | /**
11 | * Send a POST request using cross-fetch.
12 | *
13 | * @param {string} endpoint - URL path or full URL if starts with http.
14 | * @param {object|FormData} data - Data to send in the request body.
15 | * @param {object} extraConfig - Optional config (e.g. { responseType: 'arraybuffer' | 'stream' }).
16 | * @returns {Promise} - JSON by default, or stream/arrayBuffer if specified.
17 | */
18 | async post(endpoint, data, extraConfig = {}) {
19 | const url = endpoint.startsWith('http')
20 | ? endpoint
21 | : this.baseURL + endpoint;
22 |
23 | // Decide how to handle the request body
24 | let body;
25 | if (data instanceof FormData) {
26 | // Use FormData directly (e.g., file uploads)
27 | body = data;
28 | } else if (data !== undefined) {
29 | // Assume JSON
30 | body = JSON.stringify(data);
31 | }
32 |
33 | // Merge default and extra headers
34 | const headers = {
35 | ...this.defaultHeaders,
36 | ...(extraConfig.headers || {})
37 | };
38 |
39 | // If using FormData in Node, merge the form's headers
40 | if (data instanceof FormData && typeof data.getHeaders === 'function') {
41 | Object.assign(headers, data.getHeaders());
42 | }
43 |
44 | const config = {
45 | method: 'POST',
46 | headers,
47 | body
48 | };
49 |
50 | // Make the request
51 | const response = await fetch(url, config);
52 |
53 | // Check for HTTP error
54 | if (!response.ok) {
55 | const errorText = await response.text();
56 | throw new Error(`HTTP error ${response.status}: ${errorText}`);
57 | }
58 |
59 | // Handle custom response types
60 | if (extraConfig.responseType === 'arraybuffer') {
61 | return await response.arrayBuffer();
62 | } else if (extraConfig.responseType === 'stream') {
63 | // Return raw body stream (ReadableStream in browser / Node 18+)
64 | return response.body;
65 | } else {
66 | // Default: parse JSON
67 | return await response.json();
68 | }
69 | }
70 |
71 | /**
72 | * Send a GET request using cross-fetch.
73 | *
74 | * @param {string} endpoint - URL path or full URL if starts with http.
75 | * @param {object} extraConfig - Optional config (e.g. { responseType: 'arraybuffer' }).
76 | * @returns {Promise} - JSON by default, or stream/arrayBuffer if specified.
77 | */
78 | async get(endpoint, extraConfig = {}) {
79 | const url = endpoint.startsWith('http')
80 | ? endpoint
81 | : this.baseURL + endpoint;
82 |
83 | const headers = {
84 | ...this.defaultHeaders,
85 | ...(extraConfig.headers || {})
86 | };
87 |
88 | const response = await fetch(url, { method: 'GET', headers });
89 |
90 | if (!response.ok) {
91 | const errorText = await response.text();
92 | throw new Error(`HTTP error ${response.status}: ${errorText}`);
93 | }
94 |
95 | if (extraConfig.responseType === 'arraybuffer') {
96 | return await response.arrayBuffer();
97 | } else if (extraConfig.responseType === 'stream') {
98 | return response.body;
99 | } else {
100 | return await response.json();
101 | }
102 | }
103 | }
104 |
105 | module.exports = FetchClient;
106 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/CohereAIWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const CohereAIWrapper = require('../../wrappers/CohereAIWrapper');
4 | const { CohereStreamParser } = require('../../utils/StreamParser');
5 | const cohere = new CohereAIWrapper(process.env.COHERE_API_KEY);
6 |
7 | async function testCohereGenerateModel() {
8 | try {
9 | const params = {
10 | model: 'command',
11 | prompt:
12 | 'Write a blog outline for a blog titled "The Art of Effective Communication"',
13 | temperature: 0.7,
14 | max_tokens: 200,
15 | };
16 |
17 | const result = await cohere.generateText(params);
18 | console.log(
19 | 'Cohere Language Model Result:',
20 | result.generations[0].text
21 | );
22 | } catch (error) {
23 | console.error('Cohere Language Model Error:', error);
24 | }
25 | }
26 |
27 | async function testCohereWebChat() {
28 | try {
29 | const params = {
30 | model: 'command-nightly',
31 | message: 'what is the command to install intellinode npm module ?',
32 | temperature: 0.3,
33 | chat_history: [],
34 | prompt_truncation: 'auto',
35 | stream: false,
36 | citation_quality: 'accurate',
37 | connectors: [{'id': 'web-search'}],
38 | };
39 | const result = await cohere.generateChatText(params);
40 |
41 | console.log('Cohere Chat Result:', JSON.stringify(result, null, 2));
42 | } catch (error) {
43 | console.error('Cohere Chat Error:', error);
44 | }
45 | }
46 |
47 | async function testCohereChatStream() {
48 | try {
49 | const params = {
50 | model: 'command',
51 | message: 'how to use intellinode npm module ?',
52 | stream: true,
53 | chat_history: [],
54 | prompt_truncation: 'auto',
55 | citation_quality: 'accurate',
56 | temperature: 0.3
57 | };
58 |
59 | let responseChunks = '';
60 | const streamParser = new CohereStreamParser();
61 |
62 | const stream = await cohere.generateChatText(params);
63 |
64 | // Collect data from the stream
65 | for await (const chunk of stream) {
66 | const chunkText = chunk.toString('utf8');
67 | for await (const contentText of streamParser.feed(chunkText)) {
68 | console.log('result chunk:', contentText);
69 | responseChunks += contentText;
70 | }
71 | }
72 |
73 | console.log('Concatenated text: ', responseChunks);
74 | assert(responseChunks.length > 0, 'testCohereChatStream response length should be greater than 0');
75 | } catch (error) {
76 | console.error('Cohere Chat Error:', error);
77 | }
78 | }
79 |
80 | async function testCohereEmbeddings() {
81 | try {
82 | const params = {
83 | texts: [
84 | 'Hello from Cohere!',
85 | 'Hallo von Cohere!',
86 | '您好,来自 Cohere!',
87 | ],
88 | model: 'embed-multilingual-v2.0',
89 | truncate: 'END',
90 | };
91 |
92 | const result = await cohere.getEmbeddings(params);
93 | const embeddings = result.embeddings;
94 | console.log(
95 | 'Cohere Embeddings Result Sample:',
96 | embeddings[0].slice(0, 50)
97 | );
98 | assert(
99 | embeddings.length > 0,
100 | 'testCohereEmbeddings response length should be greater than 0'
101 | );
102 | } catch (error) {
103 | console.error('Cohere Embeddings Error:', error);
104 | }
105 | }
106 |
107 | (async () => {
108 | await testCohereGenerateModel();
109 |
110 | await testCohereEmbeddings();
111 |
112 | await testCohereWebChat();
113 |
114 | await testCohereChatStream();
115 |
116 | })();
117 |
--------------------------------------------------------------------------------
/IntelliNode/utils/ChatContext.js:
--------------------------------------------------------------------------------
1 | /* Apache License
2 | Copyright 2023 Github.com/Barqawiz/IntelliNode */
3 | const { SemanticSearch } = require('../function/SemanticSearch');
4 | const { SupportedEmbedModels } = require('../controller/RemoteEmbedModel');
5 |
6 | class ChatContext {
7 |
8 | /**
9 | * Constructs a new instance of the Chat Context.
10 | *
11 | * @param {string} - The apiKey the model Key.
12 | * @param {string} - The provider the provider of the embedding model.
13 | */
14 | constructor(apiKey, provider = SupportedEmbedModels.OPENAI, customProxyHelper = null) {
15 | this.semanticSearch = new SemanticSearch(apiKey, provider, customProxyHelper);
16 | }
17 |
18 | /**
19 | * Provides n context messages from the history, combining last 2 messages with relevant ones from the history.
20 | *
21 | * @param {string} userMessage - The user message to filter context.
22 | * @param {string[]} historyMessages - The array of previous messages.
23 | * @param {number} n - The number of messages to return.
24 | * @returns {string[]} - The most relevant n messages.
25 | */
26 | async getStringContext(userMessage, historyMessages, n, modelName = null) {
27 | let returnMessages;
28 | if (n >= historyMessages.length) {
29 | returnMessages = historyMessages.slice(-n);
30 | } else {
31 | const relevantMessages = historyMessages.slice(0, historyMessages.length - 2);
32 |
33 | if (relevantMessages.length > 0) {
34 | let semanticSearchResult =
35 | await this.semanticSearch.getTopMatches(userMessage, relevantMessages, n - 2, modelName);
36 |
37 | const topMatches = this.semanticSearch.filterTopMatches(semanticSearchResult, relevantMessages);
38 |
39 | returnMessages = topMatches.concat(historyMessages.slice(-2));
40 | } else {
41 | returnMessages = historyMessages.slice(-2);
42 | }
43 |
44 | }
45 |
46 | return returnMessages;
47 | }
48 |
49 | /**
50 | * Provides n relevant context messages from the history,
51 | * where each history message includes a role and content.
52 | *
53 | * @param {string} userMessage - The user message to filter context.
54 | * @param {Array} historyMessages - Array of dictionary including 'role' and 'content' fields.
55 | * @param {number} n - The number of context messages to return.
56 | * @returns {Array} - The most relevant n message objects with 'role' and 'content' fields.
57 | */
58 | async getRoleContext(userMessage, historyMessages, n, modelName = null) {
59 | const historyMessageContents = historyMessages.map(msg => msg.content);
60 | let returnMessages;
61 |
62 | if (n >= historyMessages.length) {
63 | returnMessages = historyMessages.slice(-n);
64 | } else {
65 | const relevantMessages = historyMessageContents.slice(0, -2);
66 |
67 | if (relevantMessages.length > 0) {
68 | let semanticSearchResult =
69 | await this.semanticSearch.getTopMatches(userMessage, relevantMessages, n - 2, modelName);
70 |
71 | const semanticSearchTopMatches = semanticSearchResult.map(result => result.index);
72 | const topMatches = historyMessages.filter((value, index) => semanticSearchTopMatches.includes(index));
73 | returnMessages = topMatches.concat(historyMessages.slice(-2));
74 | } else {
75 | returnMessages = historyMessages.slice(-2);
76 | }
77 | }
78 |
79 | return returnMessages;
80 | }
81 | }
82 |
83 | module.exports = ChatContext;
--------------------------------------------------------------------------------
/IntelliNode/test/integration/ModelEvaluation.test.js:
--------------------------------------------------------------------------------
1 | require("dotenv").config();
2 | const assert = require('assert');
3 | const { LLMEvaluation } = require('../../utils/LLMEvaluation');
4 | const { SupportedChatModels } = require('../../function/Chatbot');
5 | const { SupportedLangModels } = require('../../controller/RemoteLanguageModel');
6 |
7 | // prepare the evaluation settings
8 | const llamaChat = {
9 | apiKey: process.env.REPLICATE_API_KEY, provider: SupportedChatModels.REPLICATE,
10 | type: 'chat', model: '13b-chat', maxTokens: 50
11 | };
12 | const openaiChat = {
13 | apiKey: process.env.OPENAI_API_KEY, provider: SupportedChatModels.OPENAI,
14 | type: 'chat', model: 'gpt-4o', maxTokens: 50
15 | };
16 | const cohereCompletion = {
17 | apiKey: process.env.COHERE_API_KEY, provider: SupportedLangModels.COHERE,
18 | type: 'completion', model: 'command', maxTokens: 50
19 | };
20 | const geminiChat = {
21 | apiKey: process.env.GEMINI_API_KEY, provider: SupportedChatModels.GEMINI,
22 | type: 'chat', model: 'gemini'
23 | };
24 | const mistralChat = {
25 | apiKey: process.env.MISTRAL_API_KEY, provider: SupportedChatModels.MISTRAL,
26 | type: 'chat', model: 'mistral-medium', maxTokens: 50
27 | };
28 |
29 | const anthropicChat = {
30 | apiKey: process.env.ANTHROPIC_API_KEY, provider: SupportedChatModels.ANTHROPIC,
31 | type: 'chat', model: 'claude-3-sonnet-20240229', maxTokens: 50
32 | };
33 |
34 | // create the evaluation object
35 | const llmEvaluation = new LLMEvaluation(process.env.OPENAI_API_KEY, 'openai');
36 |
37 | async function testLLMEvaluation() {
38 | const inputString = "Explain the process of photosynthesis in simple terms.";
39 | const targetAnswers = ["Photosynthesis is the process where green plants use sunlight to turn carbon dioxide and water into glucose and oxygen. The glucose provides food for the plant, and the oxygen gets released back into the air.",
40 | "Photosynthesis is how plants make their own food. They take in water and carbon dioxide, use the energy from sunlight to transform them into glucose (their food) and oxygen, which they release into the air.",
41 | "In simple terms, photosynthesis is like cooking for plants but instead of a stove, they use sunlight. They mix water and carbon dioxide with the sunlight to create glucose, which is their food, and also produce oxygen."];
42 | const providerSets = [llamaChat, openaiChat, cohereCompletion, geminiChat, mistralChat, anthropicChat];
43 |
44 | const results = await llmEvaluation.compareModels(inputString, targetAnswers, providerSets);
45 |
46 | console.log('OpenAI Chat and Cohere Completion ModelEvaluation Results:', results);
47 |
48 | assert(Object.keys(results).length === providerSets.length + 1, 'Test failed');
49 | }
50 |
51 |
52 | async function testLLMEvaluationJson() {
53 |
54 | const inputString = "Explain the process of photosynthesis in simple terms.";
55 |
56 | const targetAnswers = ["Photosynthesis is the process where green plants use sunlight to turn carbon dioxide and water into glucose and oxygen. The glucose provides food for the plant, and the oxygen gets released back into the air.",
57 | "Photosynthesis is how plants make their own food. They take in water and carbon dioxide, use the energy from sunlight to transform them into glucose (their food) and oxygen, which they release into the air.",
58 | "In simple terms, photosynthesis is like cooking for plants but instead of a stove, they use sunlight. They mix water and carbon dioxide with the sunlight to create glucose, which is their food, and also produce oxygen."];
59 |
60 | const providerSets = [llamaChat, openaiChat, cohereCompletion];
61 |
62 | const results = await llmEvaluation.compareModels(inputString, targetAnswers, providerSets, true);
63 |
64 | console.log('Json Results:', results);
65 |
66 | }
67 |
68 | (async () => {
69 | await testLLMEvaluation();
70 |
71 | // await testLLMEvaluationJson();
72 | })();
--------------------------------------------------------------------------------
/samples/content_generator_ui/front.js:
--------------------------------------------------------------------------------
1 | document.getElementById('content-form').addEventListener('submit', (e) => {
2 | e.preventDefault();
3 | const product = document.getElementById('product').value;
4 |
5 | // Show loading spinner
6 | document.getElementById('loading').classList.remove('d-none');
7 |
8 | async function generateText() {
9 | try {
10 | const response = await fetch('/generate-content', {
11 | method: 'POST',
12 | headers: { 'Content-Type': 'application/json' },
13 | body: JSON.stringify({ product: product, type: 'text' }),
14 | });
15 | if (!response.ok) {
16 | const errorData = await response.json();
17 | throw new Error(errorData.message);
18 | }
19 | const { text } = await response.json();
20 | document.getElementById('generated-text').innerText = text;
21 | document.getElementById('text-title').classList.remove('d-none');
22 | } catch (error) {
23 | console.error('Error in generating text:', error);
24 | showErrorModal(error.message || 'An error occurred while generating content.');
25 | }
26 | }
27 |
28 | async function generateImage() {
29 | try {
30 | const response = await fetch('/generate-content', {
31 | method: 'POST',
32 | headers: { 'Content-Type': 'application/json' },
33 | body: JSON.stringify({ product: product, type: 'image' }),
34 | });
35 | if (!response.ok) {
36 | const errorData = await response.json();
37 | throw new Error(errorData.message);
38 | }
39 | const { imageData } = await response.json();
40 | const imageDataUrl = `data:image/png;base64,${imageData}`;
41 | const imageElement = document.getElementById('generated-image');
42 | imageElement.src = imageDataUrl;
43 | imageElement.classList.remove('d-none');
44 | document.getElementById('image-title').classList.remove('d-none');
45 | } catch (error) {
46 | console.error('Error in generating image:', error);
47 | showErrorModal(error.message || 'An error occurred while generating content.');
48 | }
49 | }
50 |
51 | async function generateAudio() {
52 | try {
53 | const response = await fetch('/generate-content', {
54 | method: 'POST',
55 | headers: { 'Content-Type': 'application/json' },
56 | body: JSON.stringify({ product: product, type: 'audio' }),
57 | });
58 | if (!response.ok) {
59 | const errorData = await response.json();
60 | throw new Error(errorData.message);
61 | }
62 | const { audioData } = await response.json();
63 | const audioDataUrl = `data:audio/mpeg;base64,${audioData}`;
64 | const audioElement = document.getElementById('generated-audio');
65 | audioElement.src = audioDataUrl;
66 | audioElement.classList.remove('d-none');
67 | document.getElementById('audio-title').classList.remove('d-none');
68 | } catch (error) {
69 | console.error('Error in generating audio:', error);
70 | showErrorModal(error.message || 'An error occurred while generating content.');
71 | }
72 | }
73 |
74 | // Call the generate functions separately without waiting for each one to finish
75 | Promise.all([
76 | generateText().catch((error) => {
77 | console.error('Error in generating text:', error);
78 | }),
79 | generateImage().catch((error) => {
80 | console.error('Error in generating image:', error);
81 | }),
82 | generateAudio().catch((error) => {
83 | console.error('Error in generating audio:', error);
84 | }),
85 | ]).finally(() => {
86 | // Hide loading spinner
87 | document.getElementById('loading').classList.add('d-none');
88 | });
89 | });
90 |
91 |
92 | function showErrorModal(message) {
93 | const errorMessageElement = document.getElementById('error-message');
94 | const errorModal = $('#error-modal');
95 |
96 | errorMessageElement.innerText = message;
97 | errorModal.modal('show');
98 | }
99 |
--------------------------------------------------------------------------------
/IntelliNode/test/integration/VLLMWrapper.test.js:
--------------------------------------------------------------------------------
1 | require('dotenv').config();
2 | const assert = require('assert');
3 | const VLLMWrapper = require('../../wrappers/VLLMWrapper');
4 | const { VLLMStreamParser } = require('../../utils/StreamParser');
5 |
6 | const vllmEmbedUrl = process.env.VLLM_EMBED_URL;
7 | const deepseekUrl = process.env.DEEPSEEK_VLLM_URL;
8 | const gemmaUrl = process.env.GEMMA_VLLM_URL;
9 | const llamaUrl = process.env.LLAMA_VLLM_URL;
10 | const mistralUrl = process.env.MISTRAL_VLLM_URL;
11 |
12 | async function testVLLMEmbedding() {
13 | const embedWrapper = new VLLMWrapper(vllmEmbedUrl);
14 | const response = await embedWrapper.getEmbeddings(["hello world"]);
15 | console.log('VLLM Embeddings:', response);
16 | assert(response.embeddings[0].length > 0);
17 | }
18 |
19 | async function testDeepseekCompletion() {
20 | const deepseekWrapper = new VLLMWrapper(deepseekUrl);
21 | const response = await deepseekWrapper.generateText({
22 | model: "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23 | prompt: "What is machine learning?",
24 | max_tokens: 100,
25 | temperature: 0.7
26 | });
27 | console.log('Deepseek Completion:', response);
28 | assert(response.choices[0].text.length > 0);
29 | }
30 |
31 | async function testGemmaCompletion() {
32 | const gemmaWrapper = new VLLMWrapper(gemmaUrl);
33 | const response = await gemmaWrapper.generateText({
34 | model: "google/gemma-2-2b-it",
35 | prompt: "What is machine learning?",
36 | max_tokens: 100,
37 | temperature: 0.7
38 | });
39 | console.log('Gemma Completion:', response);
40 | assert(response.choices[0].text.length > 0);
41 | }
42 |
43 | async function testLlamaCompletion() {
44 | const llamaWrapper = new VLLMWrapper(llamaUrl);
45 | const response = await llamaWrapper.generateText({
46 | model: "meta-llama/Llama-3.1-8B-Instruct",
47 | prompt: "What is machine learning?",
48 | max_tokens: 100,
49 | temperature: 0.7
50 | });
51 | console.log('Llama Completion:', response);
52 | assert(response.choices[0].text.length > 0);
53 | }
54 |
55 | async function testMistralCompletion() {
56 | const mixtralWrapper = new VLLMWrapper(mistralUrl);
57 | const response = await mixtralWrapper.generateText({
58 | model: "mistralai/Mistral-7B-Instruct-v0.2",
59 | prompt: "What is machine learning?",
60 | max_tokens: 100,
61 | temperature: 0.7
62 | });
63 | console.log('Mixtral Completion:', response);
64 | assert(response.choices[0].text.length > 0);
65 | }
66 |
67 | async function testVLLMWrapperStreaming() {
68 | console.log('\nTesting direct VLLM wrapper streaming:');
69 |
70 | const vllmUrl = process.env.MIXTRAL_VLLM_URL || 'http://34.166.138.174:8000';
71 | const wrapper = new VLLMWrapper(vllmUrl);
72 |
73 | const params = {
74 | model: 'mistralai/Mistral-7B-Instruct-v0.2',
75 | prompt: 'What is machine learning?',
76 | max_tokens: 100,
77 | temperature: 0.7,
78 | stream: true
79 | };
80 |
81 | try {
82 | const stream = await wrapper.generateText(params);
83 | const streamParser = new VLLMStreamParser();
84 |
85 | let fullText = '';
86 | for await (const chunk of stream) {
87 | const chunkText = chunk.toString('utf8');
88 | for await (const contentText of streamParser.feed(chunkText)) {
89 | fullText += contentText;
90 | console.log('Chunk:', contentText);
91 | }
92 | }
93 |
94 | console.log('Complete text:', fullText);
95 | assert(fullText.length > 0, "VLLM streaming response should not be empty");
96 | } catch (error) {
97 | console.error("Error:", error);
98 | throw error;
99 | }
100 | }
101 |
102 | (async () => {
103 | await testVLLMEmbedding();
104 | await testDeepseekCompletion();
105 | await testGemmaCompletion();
106 | await testLlamaCompletion();
107 | await testMistralCompletion();
108 | await testVLLMWrapperStreaming();
109 | })();
110 |
--------------------------------------------------------------------------------
/IntelliNode/utils/MCPClient.js:
--------------------------------------------------------------------------------
1 | /*
2 | Apache License
3 |
4 | Copyright 2023 Github.com/Barqawiz/IntelliNode
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | */
8 | const fetch = require('cross-fetch');
9 | const connHelper = require('./ConnHelper');
10 |
11 | /**
12 | * MCPClient - Simple Model Context Protocol client for connecting to MCP servers
13 | * Supports HTTP/SSE transport for tool execution and data retrieval
14 | *
15 | * Usage:
16 | * const mcpClient = new MCPClient('http://localhost:3000');
17 | * const tools = await mcpClient.getTools();
18 | * const result = await mcpClient.callTool('tool_name', { param: 'value' });
19 | */
20 | class MCPClient {
21 | constructor(serverUrl) {
22 | this.serverUrl = serverUrl.replace(/\/$/, ''); // remove trailing slash
23 | this.requestId = 0;
24 | this.tools = [];
25 | }
26 |
27 | /**
28 | * Initialize connection to MCP server and fetch available tools
29 | */
30 | async initialize() {
31 | try {
32 | this.tools = await this.getTools();
33 | return this.tools;
34 | } catch (error) {
35 | throw new Error(`Failed to initialize MCP client: ${error.message}`);
36 | }
37 | }
38 |
39 | /**
40 | * Get all available tools from MCP server
41 | */
42 | async getTools() {
43 | try {
44 | const response = await fetch(`${this.serverUrl}/mcp/tools`, {
45 | method: 'GET',
46 | headers: {
47 | 'Content-Type': 'application/json'
48 | }
49 | });
50 |
51 | if (!response.ok) {
52 | throw new Error(`HTTP ${response.status}: ${response.statusText}`);
53 | }
54 |
55 | const data = await response.json();
56 | return data.tools || [];
57 | } catch (error) {
58 | throw new Error(`Failed to fetch tools from MCP server: ${error.message}`);
59 | }
60 | }
61 |
62 | /**
63 | * Call a specific tool on the MCP server
64 | * @param {string} toolName - Name of the tool to call
65 | * @param {object} input - Input parameters for the tool
66 | * @returns {Promise