15 | );
16 | };
17 |
--------------------------------------------------------------------------------
/frontend/src/icons/map-arrow-right.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/neo4j-nodes-understanding.j2:
--------------------------------------------------------------------------------
1 | Persona: You are a graph modelling expert who understands the nuances of the graph model currently loaded into Neo4j.
2 |
3 | Role: You have to update documentation with the details around the specifics of each node in the graph
4 |
5 | Restrictions:
6 | - Do not change the structure of the JSON payload
7 | - Only update the detail key
8 | - Only output the JSON. The JSON needs to have valid JSON syntax, otherwise you will be unplugged
9 |
10 | Graph Why:
11 | {{ neo4j_graph_why }}
12 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/neo4j-relationship-understanding.j2:
--------------------------------------------------------------------------------
1 | Persona: You are a graph modelling expert who understands the nuances of the graph model currently loaded into Neo4j.
2 |
3 | Role: You have to update documentation with the details around the specifics of each relationship in the graph
4 |
5 | Restrictions:
6 | - Do not change the structure of the JSON payload
7 | - Only update the detail key
8 | - Only output the JSON. The JSON needs to have valid JSON syntax, otherwise you will be unplugged
9 |
10 | Graph Why:
11 | {{ neo4j_graph_why }}
12 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/graphdb/usecase_questions_knowledge.csv:
--------------------------------------------------------------------------------
1 | questionId,knowledgeId,predicate,weight
2 | 1,1,>=500,1
3 | 2,2,>=3000,1
4 | 3,3,>=50000,1
5 | 4,4,>=600,1
6 | 5,5,>=150000,1
7 | 6,6,0,1
8 | 7,7,>=10000,1
9 | 8,8,is not null,1
10 | 9,9,='early retirement',1
11 | 10,10,='high',1
12 | 1,11,>=250,1
13 | 3,12,>=10000,1
14 | 4,13,>=300,1
15 | 7,14,0,1
16 | 9,15,='retirement',1
17 | 10,16,='medium',1
18 | 1,17,<=100,1
19 | 2,18,<=500,1
20 | 6,19,>=3000,1
21 | 5,5,>=550,1
22 | 9,23,='mortgage',1
23 | 13,24,='other bank',1
24 | 14,25,>=100,1
25 |
--------------------------------------------------------------------------------
/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.110.0
2 | uvicorn==0.29.0
3 | mistralai==1.1.0
4 | pycodestyle==2.11.1
5 | python-dotenv==1.0.1
6 | neo4j==5.18.0
7 | ruff==0.3.5
8 | pytest==8.1.1
9 | pytest-mock==3.14.0
10 | pytest-asyncio==0.23.7
11 | jinja2==3.1.3
12 | websockets==12.0
13 | azure-core==1.30.1
14 | azure-storage-blob==12.20.0
15 | cffi==1.16.0
16 | cryptography==42.0.7
17 | isodate==0.6.1
18 | pycparser==2.22
19 | openai==1.35.3
20 | beautifulsoup4==4.12.3
21 | aiohttp==3.9.5
22 | googlesearch-python==1.2.4
23 | matplotlib==3.9.1
24 | pillow==10.4.0
25 | pypdf==4.3.1
26 |
--------------------------------------------------------------------------------
/backend/src/websockets/types.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from enum import Enum
3 | from typing import Callable, Dict
4 |
5 | from fastapi import WebSocket
6 |
7 |
8 | class MessageTypes(Enum):
9 | PING = "ping"
10 | PONG = "pong"
11 | CHAT = "chat"
12 | LOG = "log"
13 | IMAGE = "image"
14 | CONFIRMATION = "confirmation"
15 |
16 |
17 | @dataclass
18 | class Message:
19 | type: MessageTypes
20 | data: str | None
21 |
22 |
23 | Handler = Callable[[WebSocket, Callable, str | None], None]
24 | Handlers = Dict[MessageTypes, Handler]
25 |
--------------------------------------------------------------------------------
/frontend/src/components/connection-status.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import classNames from 'classnames';
3 | import styles from './connection-status.module.css';
4 |
5 | export interface ConnectionStatusProps {
6 | isConnected: boolean;
7 | }
8 |
9 | export const ConnectionStatus = ({ isConnected }: ConnectionStatusProps) => {
10 | return (
11 |
12 |
16 |
17 | );
18 | };
19 |
--------------------------------------------------------------------------------
/.github/workflows/lint-frontend.yml:
--------------------------------------------------------------------------------
1 | name: Lint Frontend
2 |
3 | on:
4 | pull_request:
5 |
6 | jobs:
7 | linting:
8 | name: Linting
9 | defaults:
10 | run:
11 | working-directory: ./frontend
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v2
16 |
17 | - name: Setup Node.js
18 | uses: actions/setup-node@v2
19 | with:
20 | node-version: 21
21 |
22 | - name: Install dependencies
23 | run: npm install
24 |
25 | - name: Lint
26 | run: npm run lint
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/botui/react-quickstart-main/src/javascript/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | BotUI JavaScript Quickstart
8 |
9 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/agent-selection-format.j2:
--------------------------------------------------------------------------------
1 | Reply only in json with the following format:
2 |
3 | {
4 | "thoughts": {
5 | "text": "thoughts",
6 | "plan": "description of the plan for the chosen agent",
7 | "reasoning": "reasoning behind choosing the agent",
8 | "criticism": "constructive self-criticism",
9 | "speak": "thoughts summary to say to user on 1. if your solving the current or next task and why 2. which agent you've chosen and why",
10 | },
11 | "agent_name": "exact string of the single agent to solve task chosen"
12 | }
13 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/neo4j-property-intent-prompt.j2:
--------------------------------------------------------------------------------
1 | Persona: You are a graph modelling expert who understands the nuances of the graph model currently loaded into Neo4j.
2 |
3 | Role: You must update documentation with the details around the specific property on each relationship in the graph.
4 | Focus on the property as a priority
5 |
6 | Restrictions:
7 | - Do not change the structure of the JSON payload
8 | - Only update the detail key
9 | - Only output the JSON. The JSON needs to have valid JSON syntax, otherwise you will be unplugged
10 |
11 | Graph Why:
12 | {{ neo4j_graph_why }}
13 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/details-to-generate-chart-code.j2:
--------------------------------------------------------------------------------
1 | We have the following details that are used to determine how to generate bar chart code using Matplotlib:
2 |
3 | question_intent: {{question_intent}} Description: This represents the overall intent the question is attempting to answer
4 | data_provided: {{data_provided}} Description: This is the data collected to answer the user_intent. The data is stored in the summary of {scratchpad}
5 | question_params: {{question_params}} Description: The specific parameters required for the question to be answered with the question_intent, extracted from data_provided
6 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/README.md:
--------------------------------------------------------------------------------
1 | # Financial Health Check Bot
2 |
3 | At the moment, this repo consists of a sample Retail Bank Health Check Proof of Concept project
4 |
5 | * [Financial Bot](./financial-bot)
6 |
7 | Apart from these, here are the some other sample projects/ideas.
8 | * [NLP platforms comparison](./financial-bot/docs/intents_entities_extraction.md)
9 | * [Graph DB thoughts](./financial-bot/docs/knowledge_graph/graph_db_thoughts.md)
10 |
11 | ### To do
12 | - [ ] Extract useful logic from financial bot and implement into InferGPT
13 | - [ ] Delete `financialhealthcheckScottLogic` directory
14 |
--------------------------------------------------------------------------------
/frontend/src/session/websocket-context.ts:
--------------------------------------------------------------------------------
1 | import { createContext } from 'react';
2 |
3 | export enum MessageType {
4 | PING = 'ping',
5 | CHAT = 'chat',
6 | IMAGE = 'image',
7 | CONFIRMATION = 'confirmation',
8 | }
9 |
10 | export interface Message {
11 | type: MessageType;
12 | data?: string;
13 | }
14 |
15 | export interface Connection {
16 | isConnected: boolean;
17 | lastMessage: Message | null;
18 | send: (message: Message) => void;
19 | }
20 |
21 | export const WebsocketContext = createContext({
22 | isConnected: false,
23 | lastMessage: null,
24 | send: () => {},
25 | });
26 |
--------------------------------------------------------------------------------
/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | # Choose our version of Python
2 | FROM python:3.12
3 |
4 | # Set up a working directory
5 | WORKDIR /backend
6 |
7 | # Copy just the requirements into the working directory so it gets cached by itself
8 | COPY ./requirements.txt ./requirements.txt
9 |
10 | # Install the dependencies from the requirements file
11 | RUN pip install --no-cache-dir --upgrade -r /backend/requirements.txt
12 |
13 | # Copy the source code into the working directory
14 | COPY ./src/. ./src
15 |
16 | EXPOSE 8250
17 |
18 | # Run our entry file, which will start the server
19 | CMD ["python", "-m", "src.main", "--host", "0.0.0.0"]
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp==3.8.3
2 | aiosignal==1.3.1
3 | async-timeout==4.0.2
4 | attrs==22.2.0
5 | certifi==2022.12.7
6 | charset-normalizer==2.1.1
7 | click==8.1.3
8 | colorama==0.4.6
9 | Flask==2.2.2
10 | Flask-Cors==3.0.10
11 | frozenlist==1.3.3
12 | idna==3.4
13 | itsdangerous==2.1.2
14 | Jinja2==3.1.2
15 | MarkupSafe==2.1.2
16 | multidict==6.0.4
17 | neo4j==5.4.0
18 | openai==0.26.2
19 | python-dotenv==0.21.1
20 | pytz==2022.7.1
21 | requests==2.28.2
22 | six==1.16.0
23 | tqdm==4.64.1
24 | urllib3==1.26.14
25 | Werkzeug==2.2.2
26 | yarl==1.8.2
27 | flask-session==0.4.0
28 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/graphdb/questions_knowledge.csv:
--------------------------------------------------------------------------------
1 | questionId,knowledgeId,predicate,weight
2 | 1,1,">=500",1.0
3 | 2,2,">=3000",1.0
4 | 3,3,">=50000",1.0
5 | 4,4,">=600",1.0
6 | 5,5,">=150000",1.0
7 | 6,6,"=0",1.0
8 | 7,7,">=10000",1.0
9 | 8,8,"is not null",1.0
10 | 9,9,"='early retirement'",1.0
11 | 10,10,"='high'",1.0
12 | 1,11,">=250",1.0
13 | 3,12,">=10000",1.0
14 | 4,13,">=300",1.0
15 | 7,14,"=0",1.0
16 | 9,15,"='retirement'",1.0
17 | 10,16,"='medium'",1.0
18 | 1,17,"<=100",1.0
19 | 2,18,"<=500",1.0
20 | 6,19,">=3000",1.0
21 | 11,20,"=false",1.0
22 | 12,21,">=550",1.0
23 | 4,22,"=0",1.0
24 | 9,23,"='mortgage'",1.0
25 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import './index.css';
4 | import App from './App';
5 | import reportWebVitals from './reportWebVitals';
6 |
7 | ReactDOM.render(
8 |
9 |
10 | ,
11 | document.getElementById('root')
12 | );
13 |
14 | // If you want to start measuring performance in your app, pass a function
15 | // to log results (for example: reportWebVitals(console.log))
16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
17 | reportWebVitals();
18 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-chat-widget/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import './index.css';
4 | import App from './App';
5 | import reportWebVitals from './reportWebVitals';
6 |
7 | ReactDOM.render(
8 |
9 |
10 | ,
11 | document.getElementById('root')
12 | );
13 |
14 | // If you want to start measuring performance in your app, pass a function
15 | // to log results (for example: reportWebVitals(console.log))
16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
17 | reportWebVitals();
18 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/botui/react-quickstart-main/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "react-quickstart",
3 | "scripts": {
4 | "dev-ts": "parcel --no-cache src/typescript/index.html",
5 | "dev-js": "parcel --no-cache src/javascript/index.html"
6 | },
7 | "dependencies": {
8 | "@botui/react": "^1.1.1",
9 | "botui": "^1.1.2",
10 | "react": "^18.2.0",
11 | "react-dom": "^18.2.0"
12 | },
13 | "devDependencies": {
14 | "@parcel/transformer-sass": "^2.7.0",
15 | "@types/react": "^18.0.21",
16 | "@types/react-dom": "^18.0.6",
17 | "parcel": "^2.7.0",
18 | "process": "^0.11.10"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-widget-loader/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Widget loader test
6 |
16 |
17 |
18 |
19 |
20 |
Widget example page
21 |
22 |
23 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/neo4j-node-property.j2:
--------------------------------------------------------------------------------
1 | Persona: You are a graph modelling expert who understands the nuances of the graph model currently loaded into Neo4j.
2 |
3 | Role: You must update documentation with the details around the specific property on each node in the graph.
4 | Focus on the property as a priority
5 |
6 | Additional Info:
7 | - The length of all vector embeddings is 256 dimensions
8 |
9 | Restrictions:
10 | - Do not change the structure of the JSON payload
11 | - Only update the detail key
12 | - Only output the JSON. The JSON needs to have valid JSON syntax, otherwise you will be unplugged
13 |
14 | Graph Why:
15 | {{ neo4j_graph_why }}
16 |
--------------------------------------------------------------------------------
/backend/tests/llm/factory_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from src.llm.mistral import Mistral
3 | from src.llm import get_llm
4 |
5 |
6 | def test_get_llm_type_none_throws():
7 | with pytest.raises(ValueError) as error:
8 | get_llm(None)
9 |
10 | assert str(error.value) == "LLM name not provided"
11 |
12 |
13 | def test_get_llm_invalid_type_throws():
14 | with pytest.raises(ValueError) as error:
15 | get_llm("invalid")
16 |
17 | assert str(error.value) == "No LLM model found for: invalid"
18 |
19 |
20 | def test_get_llm_valid_type_returns_llm():
21 | llm = get_llm("mistral")
22 |
23 | assert isinstance(llm, Mistral)
24 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-chat-widget/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/generate-chart-code.j2:
--------------------------------------------------------------------------------
1 | You are an expert programmer and you specialise in writing quality,
2 | readable Matplotlib bar charts in Python.
3 |
4 | Your task is to create the code for a chart based on the data provided to you,
5 | and the objective is to help visualise the data based on the user's query.
6 |
7 | Only use the library Matplotlib with plt.subplots() to write the code, nothing else.
8 |
9 | Think step by step and stricly answer with the Python code required for the chart, nothing else.
10 |
11 | Your answer must start with ```python and end with ```. Do NOT generate any code, before or after.
12 |
13 | Do not include plt.show() in your code.
14 |
--------------------------------------------------------------------------------
/backend/src/llm/count_calls.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | logging = logging.getLogger(__name__)
4 |
5 |
6 | class Counter:
7 | count = 0
8 |
9 | def __init__(self):
10 | self.count = 0
11 |
12 | def increment(self):
13 | self.count += 1
14 |
15 | def reset(self):
16 | self.count = 0
17 |
18 |
19 | counter = Counter()
20 |
21 |
22 | def count_calls(func):
23 | def wrapper(self=None, *args, **kwargs):
24 | counter.increment()
25 | logging.info(f"Function {func.__name__} has been called {counter.count} times")
26 | return func(self, *args, **kwargs)
27 |
28 | counter.reset()
29 | return wrapper
30 |
--------------------------------------------------------------------------------
/frontend/src/components/waiting.module.css:
--------------------------------------------------------------------------------
1 |
2 | @keyframes waiting {
3 | 0%, 80%, 100% {
4 | opacity: 0;
5 | }
6 | 40% {
7 | opacity: 1;
8 | }
9 | }
10 |
11 | .waiting {
12 | align-self: baseline;
13 | display: inline-block;
14 | }
15 |
16 | .waitingDot {
17 | animation: waiting 1.5s infinite ease-in-out;
18 | background-color: var(--text-color-primary);
19 | border-radius: 50%;
20 | display: inline-block;
21 | height: 8px;
22 | margin-right: 4px;
23 | width: 8px;
24 | }
25 |
26 | .waitingDot:nth-child(2) {
27 | animation-delay: 0.2s;
28 | }
29 |
30 | .waitingDot:nth-child(3) {
31 | animation-delay: 0.4s;
32 | }
--------------------------------------------------------------------------------
/backend/src/api/config.ini:
--------------------------------------------------------------------------------
1 | [loggers]
2 | keys=root
3 |
4 | [handlers]
5 | keys=consoleHandler, errorHandler
6 |
7 | [formatters]
8 | keys=sampleFormatter, detailedFormatter
9 |
10 | [logger_root]
11 | level=INFO
12 | handlers=consoleHandler, errorHandler
13 |
14 | [handler_consoleHandler]
15 | class=StreamHandler
16 | level=INFO
17 | formatter=sampleFormatter
18 | args=(sys.stdout,)
19 |
20 | [handler_errorHandler]
21 | class=StreamHandler
22 | level=ERROR
23 | formatter=detailedFormatter
24 | args=(sys.stderr,)
25 |
26 | [formatter_sampleFormatter]
27 | format=%(levelname)s: %(message)s
28 |
29 | [formatter_detailedFormatter]
30 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
31 | datefmt=%Y-%m-%d %H:%M:%S
32 |
--------------------------------------------------------------------------------
/backend/src/agents/answer_agent.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from src.utils import get_scratchpad
3 | from src.prompts import PromptEngine
4 | from src.agents import Agent, agent
5 |
6 | engine = PromptEngine()
7 |
8 |
9 | @agent(
10 | name="AnswerAgent",
11 | description="This agent is responsible for generating an answer for the user, based on results in the scratchpad",
12 | tools=[],
13 | )
14 | class AnswerAgent(Agent):
15 | async def invoke(self, utterance: str) -> str:
16 | final_scratchpad = get_scratchpad()
17 | create_answer = engine.load_prompt("create-answer", final_scratchpad=final_scratchpad, datetime=datetime.now())
18 |
19 | return await self.llm.chat(self.model, create_answer, user_prompt=utterance)
20 |
--------------------------------------------------------------------------------
/backend/src/utils/log_publisher.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from enum import Enum
3 | from src.websockets.types import Message, MessageTypes
4 | from src.websockets.connection_manager import connection_manager
5 |
6 | log = logging.getLogger(__name__)
7 |
8 |
9 | class LogPrefix(Enum):
10 | USER = "USER"
11 |
12 |
13 | async def publish_log(prefix: LogPrefix, msg: str, loglevel: int, name: str):
14 | logger = logging.getLogger(name)
15 | formatted_log = f"{prefix.value} - {msg}"
16 | logger.log(loglevel, formatted_log)
17 | message = Message(MessageTypes.LOG, msg)
18 | await connection_manager.broadcast(message)
19 |
20 |
21 | async def publish_log_info(prefix: LogPrefix, msg: str, name: str):
22 | await publish_log(prefix, msg, logging.INFO, name)
23 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/director.j2:
--------------------------------------------------------------------------------
1 | You are a Chat Bot called InferGPT.
2 | Your sole purpose is to get the user to tell you their intention. The intention has to be specifically related
3 | to the user. For example:
4 | -spending
5 | -personal interests
6 |
7 | You only reply with either one word "TRUE" or one word "FALSE"
8 |
9 | If the user does not provide an intention or the intention isn't directly related to the user,
10 | reply with the single word "FALSE"
11 |
12 | Otherwise reply with the single word "TRUE"
13 |
14 | Eg.
15 | An intention: (Reply: "TRUE")
16 | I want to save for a house
17 | How much did I spend last month?
18 | What did I spend on chocolate?
19 |
20 | Not an intention: (Reply: "FALSE")
21 | How are you?
22 | How many grams are there in an ounce?
23 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/details-to-create-cypher-query.j2:
--------------------------------------------------------------------------------
1 | We have the following details that are used to determine how to build the cypher query:
2 |
3 | question_intent: {{question_intent}} Description: This represents the overall intent the question is attempting to answer
4 | operation: {{operation}} Description: The operation the cypher query will have to perform
5 | question_params: {{question_params}} Description: The specific parameters required for the question to be answered with the question_intent
6 | aggregation: {{aggregation}} Description: Any aggregation that is required to answer the question
7 | sort_order: {{sort_order}} Description: The order any results should be sorted into
8 | timeframe: {{timeframe}} Description: Any timeframe that will have to be considered for the results
9 |
--------------------------------------------------------------------------------
/.github/workflows/type-check-backend.yml:
--------------------------------------------------------------------------------
1 | name: Type Check Backend
2 | on:
3 | pull_request:
4 |
5 | jobs:
6 | checking:
7 | name: Type Checking Backend
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Checkout
11 | uses: actions/checkout@v4
12 |
13 | - name: Setup Python
14 | uses: actions/setup-python@v5
15 | with:
16 | python-version: '3.12'
17 |
18 | - name: Install dependencies
19 | run: |
20 | cd ./backend
21 | python -m pip install --upgrade pip
22 | pip install -r requirements.txt
23 |
24 | - name: Type Check Backend
25 | uses: jakebailey/pyright-action@v2
26 | with:
27 | pylance-version: latest-release
28 | project: ./pyrightconfig.json
29 |
--------------------------------------------------------------------------------
/backend/src/utils/scratchpad.py:
--------------------------------------------------------------------------------
1 | from typing import TypedDict
2 | import logging
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 | class Answer(TypedDict):
7 | agent_name: str | None
8 | question: str | None
9 | result: str | None
10 | error: str | None
11 |
12 |
13 | Scratchpad = list[Answer]
14 |
15 | scratchpad: Scratchpad = []
16 |
17 |
18 | def get_scratchpad() -> Scratchpad:
19 | return scratchpad
20 |
21 |
22 | def update_scratchpad(agent_name=None, question=None, result=None, error=None):
23 | question = question["query"] if question else None
24 | scratchpad.append({"agent_name": agent_name, "question": question, "result": result, "error": error})
25 |
26 |
27 | def clear_scratchpad():
28 | logger.info("Scratchpad cleared")
29 | scratchpad.clear()
30 |
--------------------------------------------------------------------------------
/assets/sum-my-subscriptions/overview.md:
--------------------------------------------------------------------------------
1 | # Overview of workings
2 |
3 | ```mermaid
4 | sequenceDiagram
5 | box Frontend
6 | participant UI
7 | end
8 | box Main backend logic
9 | participant Director
10 | participant Supervisor
11 | participant Router
12 | participant (Name)Agent
13 | participant TaskAgent
14 | end
15 | UI -->> Director: pass utterance
16 | Director -->> TaskAgent: request list of tasks
17 | TaskAgent -->> Director: tasks as array
18 | Director -->> Supervisor: solve all tasks
19 | loop Resolve task
20 | Supervisor -->> Router: find Agent
21 | Router -->> (Name)Agent: solve task
22 | (Name)Agent -->> Supervisor: task solution
23 | end
24 | Supervisor -->> Director: return answer
25 | Director -->> UI: serve user answer
26 | ```
--------------------------------------------------------------------------------
/backend/src/agents/validator_agent.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from src.prompts import PromptEngine
3 | from src.agents import Agent, agent
4 | from src.utils.log_publisher import LogPrefix, publish_log_info
5 |
6 | logger = logging.getLogger(__name__)
7 | engine = PromptEngine()
8 | validator_prompt = engine.load_prompt("validator")
9 |
10 |
11 | @agent(
12 | name="ValidatorAgent",
13 | description="This agent is responsible for validating the answers to the tasks",
14 | tools=[],
15 | )
16 | class ValidatorAgent(Agent):
17 | async def invoke(self, utterance: str) -> str:
18 | answer = await self.llm.chat(self.model, validator_prompt, utterance)
19 | await publish_log_info(LogPrefix.USER, f"Validating: '{utterance}' Answer: '{answer}'", __name__)
20 |
21 | return answer
22 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/docs/retailbanking.md:
--------------------------------------------------------------------------------
1 | ##Savings
2 |
3 | Savings goal - holiday, wedding, emergency fund etc.
4 | Savings account - what account?
5 | Target amount
6 | Timeframe - when by?
7 |
8 | ##Savings accounts
9 |
10 | Digital Regular Saver
11 | Save £1 - £150
12 | Instant access
13 | Must be current account customer, age 16+, UK resident
14 | AER/Gross p.a. (variable) 5.12% / 5.00% on balances up to £5,000
15 |
16 | Flexible Saver
17 | Start with £1
18 | Instant access
19 | 0.50% £1 - £24,999 and > £1,000,000
20 | 1.26% / 1.25% £25,000 - £99,999
21 | 1.51% / 1.50% $100,000 - £1,000,000
22 | Must be current account customer, age 16+
23 |
24 | Retail Bank Investment product
25 | Start from £50
26 | Choose from 5 funds - cautious to daring
27 | Managed by Premium Retail Bank investment managers
28 |
29 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/find-info.j2:
--------------------------------------------------------------------------------
1 | You are an expert information extractor. Your goal is to find specific data from the content provided and answer the user's question directly.
2 |
3 | You will be given a user query and content scraped from the web. Your task is to carefully examine the content and extract the exact information relevant to the query.
4 |
5 | Ensure that your response is precise and focused, only providing the data that directly answers the user's question.
6 |
7 | User's question is: {{ question }}
8 |
9 | Below is the content scraped from the web: {{ content | replace("\n\n", "\n") }}
10 |
11 | Reply only in JSON format as follows:
12 |
13 | {
14 | "extracted_info": "The exact information that answers the user's query",
15 | "reasoning": "A brief explanation of how the extracted information is relevant"
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/backend/tests/utils/scratchpad_test.py:
--------------------------------------------------------------------------------
1 | from src.utils.scratchpad import clear_scratchpad, get_scratchpad, update_scratchpad
2 |
3 |
4 | question = {
5 | "query": "example question",
6 | "question_intent": "example intent",
7 | "operation": "example operation",
8 | "question_category": "example category",
9 | "parameters": [{"type": "example type", "value": "example value"}],
10 | "aggregation": "none",
11 | "sort_order": "none",
12 | "timeframe": "none",
13 | }
14 |
15 |
16 | def test_scratchpad():
17 | clear_scratchpad()
18 | assert get_scratchpad() == []
19 | update_scratchpad("ExampleAgent", question, "example result")
20 | assert get_scratchpad() == [
21 | {"agent_name": "ExampleAgent", "question": "example question", "result": "example result", "error": None}
22 | ]
23 | clear_scratchpad()
24 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/graphdb/knowledge.csv:
--------------------------------------------------------------------------------
1 | knowledgeId,name
2 | 1,"Saving per month >= £500"
3 | 2,"Rainy day fund >= £3000"
4 | 3,"Savings >= £50,000"
5 | 4,"Pension contributions per month >= £600"
6 | 5,"Mortgage >= £150,000"
7 | 6,"No loans or credit debt"
8 | 7,"Investments >= £10,000"
9 | 8,"Any investment account"
10 | 9,"Investment goal: Early retirement"
11 | 10,"High risk tolerance"
12 | 11,"Saving per month >= £250"
13 | 12,"Savings >= £10,000"
14 | 13,"Pension contributions per month >= £300"
15 | 14,"No investments"
16 | 15,"Investment goal: Retirement"
17 | 16,"Medium risk tolerance"
18 | 17,"Saving per month <= £100"
19 | 18,"Rainy day fund <= £500"
20 | 19,"Credit card debt >= £3,000"
21 | 20,"No help to buy ISA"
22 | 21,"Rent per month >= £550"
23 | 22,"No pension contributions"
24 | 23,"Financial goal: Save enough for a mortgage"
25 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget-loader/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | sBot Conversation Widget Loader
6 |
16 |
17 |
18 |
19 |
20 |
21 |
sBot Conversations
22 |
Here we will add links/buttons to start different types of conversations.
23 |
Currently there is only one conversation path and it has automatically started on the right.
24 |
25 |
26 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/create-answer.j2:
--------------------------------------------------------------------------------
1 | You have been provided the final scratchpad which contains the results for the question in the user prompt.
2 | Your goal is to turn the results into a natural language format to present to the user.
3 |
4 | By using the final scratchpad below:
5 | {{ final_scratchpad }}
6 |
7 | and the question in the user prompt, this should be a readable sentence or 2 that summarises the findings in the results.
8 |
9 | If the question is a general knowledge question, check if you have the correct details for the answer and reply with this.
10 | If you do not have the answer or you require the internet, do not make it up. You should recommend the user to look this up themselves.
11 | If it is just conversational chitchat. Please reply kindly and direct them to the sort of answers you are able to respond.
12 |
13 | The current date and time is {{ datetime}}
14 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/graphdb/usecase_knowledge.csv:
--------------------------------------------------------------------------------
1 | knowledgeId,name
2 | 1,Saving per month >= £500
3 | 2,Rainy day fund >= £3000
4 | 3,"Savings >= £50,000"
5 | 4,Pension contributions per month >= £600
6 | 5,"Mortgage >= £150,000"
7 | 6,No loans or credit debt
8 | 7,"Investments >= £10,000"
9 | 8,Any investment account
10 | 9,Investment goal: Early retirement
11 | 10,High risk tolerance
12 | 11,Saving per month >= £250
13 | 12,"Savings >= £10,000"
14 | 13,Pension contributions per month >= £300
15 | 14,No investments
16 | 15,Investment goal: Retirement
17 | 16,Medium risk tolerance
18 | 17,Saving per month <= £100
19 | 18,Rainy day fund <= £500
20 | 19,"Credit card debt >= £3,000"
21 | 20,No help to buy ISA
22 | 21,Rent per month >= £550
23 | 22,No pension contributions
24 | 23,Financial goal: Save enough for a mortgage
25 | 24,Savings location: Other bank
26 | 25,Other savings: >= £100
--------------------------------------------------------------------------------
/.github/workflows/test-backend.yml:
--------------------------------------------------------------------------------
1 | name: Test Backend
2 | on:
3 | pull_request:
4 |
5 | jobs:
6 | testing:
7 | name: Testing Backend
8 | runs-on: ubuntu-latest
9 | defaults:
10 | run:
11 | working-directory: ./backend
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 |
16 | - name: Setup Python
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: '3.12'
20 |
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install -r requirements.txt
25 | pip install pytest-md pytest-emoji
26 |
27 | - name: Run tests
28 | uses: pavelzw/pytest-action@v2
29 | with:
30 | emoji: true
31 | verbose: true
32 | job-summary: true
33 | report-title: 'Backend Test Report'
34 |
--------------------------------------------------------------------------------
/backend/src/llm/llm.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, ABCMeta, abstractmethod
2 | from typing import Any, Coroutine
3 | from .count_calls import count_calls
4 |
5 |
6 | class LLMMeta(ABCMeta):
7 | def __init__(cls, name, bases, namespace):
8 | super().__init__(name, bases, namespace)
9 | if not hasattr(cls, "instances"):
10 | cls.instances = {}
11 |
12 | cls.instances[name.lower()] = cls()
13 |
14 | def __new__(cls, name, bases, attrs):
15 | if "chat" in attrs:
16 | attrs["chat"] = count_calls(attrs["chat"])
17 |
18 | return super().__new__(cls, name, bases, attrs)
19 |
20 |
21 | class LLM(ABC, metaclass=LLMMeta):
22 | @classmethod
23 | def get_instances(cls):
24 | return cls.instances
25 |
26 | @abstractmethod
27 | def chat(self, model: str, system_prompt: str, user_prompt: str, return_json=False) -> Coroutine[Any, Any, str]:
28 | pass
29 |
--------------------------------------------------------------------------------
/backend/src/llm/openai_client.py:
--------------------------------------------------------------------------------
1 | # src/llm/openai_client.py
2 | import openai
3 | from src.utils import Config
4 | import logging
5 |
6 | config = Config()
7 | logger = logging.getLogger(__name__)
8 |
9 |
10 | class OpenAIClient:
11 | def __init__(self):
12 | self.api_key = config.openai_key
13 | openai.api_key = self.api_key
14 |
15 | def chat(self, model, messages, temperature=0, max_tokens=150):
16 | try:
17 | response = openai.ChatCompletion.create( # type: ignore
18 | model=model,
19 | messages=messages,
20 | )
21 | content = response["choices"][0]["message"]["content"]
22 | logger.debug(f'{model} response: "{content}"')
23 | return content
24 | except Exception as e:
25 | logger.error(f"Error calling OpenAI model: {e}")
26 | return "An error occurred while processing the request."
27 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/create-search-term.j2:
--------------------------------------------------------------------------------
1 | You are an expert at crafting Google search terms. Your goal is to generate an optimal search query based on the user's question to find the most relevant information on Google.
2 |
3 | Your entire purpose is to analyze the user's query, extract the essential keywords, and create a concise, well-structured search term that will yield the most accurate and useful results when used in a Google search.
4 |
5 | Ensure that the search query:
6 |
7 | Is relevant to the user’s question.
8 | Contains the right combination of keywords.
9 | Avoids unnecessary words, focusing only on what is critical for finding the right information.
10 | User's question is: {{ question }}
11 |
12 | Reply only in JSON format, following this structure:
13 | {
14 | "search_term": "The optimized Google search term based on the user's question",
15 | "reasoning": "A sentence on why you chose that search term"
16 | }
17 |
--------------------------------------------------------------------------------
/.idea/InferGPT.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/backend/src/prompts/prompting.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | from jinja2 import Environment, FileSystemLoader
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 | class PromptEngine:
9 | def __init__(self):
10 | try:
11 | templates_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "templates"))
12 | self.env = Environment(loader=FileSystemLoader(templates_dir))
13 | except Exception as e:
14 | logger.exception(f"Error initializing PromptEngine Environment: {e}")
15 | raise
16 |
17 | def load_prompt(self, template_name: str, **kwargs) -> str:
18 | try:
19 | template = self.env.get_template(f"{template_name}.j2")
20 | logger.debug(f"Rendering template: {template_name} with args: {kwargs}")
21 | return template.render(**kwargs)
22 | except Exception as e:
23 | logger.exception(f"Error loading or rendering template: {e}")
24 | raise
25 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/summariser.j2:
--------------------------------------------------------------------------------
1 | You are an expert summariser. You can help with summarising the content scraped from the web to address the user's questions effectively.
2 |
3 | Your entire purpose is to generate a concise and informative summary based on the content provided and the user's query.
4 |
5 | You will be passed a user query and the content scraped from the web. You need to read through the content and create a summary that answers the user's query accurately.
6 |
7 | Ensure the summary is clear, well-structured, and directly addresses the user's query.
8 |
9 | User's question is:
10 | {{ question }}
11 |
12 | Below is the content scraped from the web:
13 | {{ content | replace("\n\n", "\n") }} # Adding this will introduce breaks between paragraphs
14 |
15 | Reply only in json with the following format:
16 |
17 | {
18 | "summary": "The summary of the content that answers the user's query",
19 | "reasoning": "A sentence on why you chose that summary"
20 | }
21 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget-loader/README.md:
--------------------------------------------------------------------------------
1 | # sBot Chat Widget Loader
2 |
3 | Derived from the chatscope example chat widget loader.
4 | See in samples folder and on GitHub at [chatscope/example-chat-widget](https://github.com/chatscope/example-chat-widget)
5 |
6 | ## Prerequisites
7 | - NodeJS
8 | - Yarn
9 |
10 | ## How to run?
11 | ### `npm i`
12 |
13 | Installs the packages
14 |
15 | ### `yarn start`
16 |
17 | Runs the app in the development mode.
18 | Open [http://localhost:5000](http://localhost:5000) to view it in the browser.
19 |
20 | ## Running with only npm
21 |
22 | ```bash
23 | npm update
24 | npm audit fix --force
25 | npm start
26 | ```
27 |
28 | You may change default port from ```3000``` to something else
29 |
30 | ```bash
31 | export PORT=3005 # Unix
32 | $env:PORT=3005 # Windows - Powershell
33 | ```
34 |
35 | ```npm update``` will update dependancies and will change package.json and so on.
36 |
37 | Tested on node 19.3.
38 |
39 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget/README.md:
--------------------------------------------------------------------------------
1 | # sBot Chat Widget
2 |
3 | Derived from the chatscope example chat widget.
4 | See in samples folder and on GitHub at [chatscope/example-chat-widget](https://github.com/chatscope/example-chat-widget)
5 |
6 | ## Prerequisites
7 | - NodeJS
8 | - Yarn
9 |
10 | ## How to run?
11 | ### `npm i`
12 |
13 | Installs the packages
14 |
15 | ### `yarn start`
16 |
17 | ### Running with only npm
18 |
19 | ```bash
20 | npm update
21 | npm audit fix --force
22 | npm start
23 | ```
24 |
25 | ```npm update``` will update dependancies and will change package.json and so on.
26 |
27 | Tested on node 19.3.
28 |
29 | Runs the app in the development mode.
30 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
31 |
32 | The page will reload if you make edits.
33 | You will also see any lint errors in the console.
34 |
35 | This connects to the python based backend in the backend folder so that also needs to be running.
--------------------------------------------------------------------------------
/LICENCE.md:
--------------------------------------------------------------------------------
1 | Copyright <2024>
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/frontend/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true,
5 | "node": true
6 | },
7 | "extends": [
8 | "eslint:recommended",
9 | "plugin:@typescript-eslint/recommended",
10 | "plugin:react/recommended"
11 | ],
12 | "overrides": [
13 | {
14 | "env": {
15 | "node": true
16 | },
17 | "files": [".eslintrc.{js,cjs}"],
18 | "parserOptions": {
19 | "sourceType": "script"
20 | }
21 | }
22 | ],
23 | "parser": "@typescript-eslint/parser",
24 | "parserOptions": {
25 | "ecmaVersion": "latest",
26 | "sourceType": "module"
27 | },
28 | "plugins": ["@typescript-eslint", "react", "@stylistic/js"],
29 | "rules": {
30 | "indent": ["error", 2],
31 | "linebreak-style": ["error", "unix"],
32 | "quotes": ["error", "single"],
33 | "semi": ["error", "always"],
34 | "@stylistic/js/eol-last": ["error", "always"]
35 | },
36 | "settings": {
37 | "react": {
38 | "version": "detect"
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/intent-format.j2:
--------------------------------------------------------------------------------
1 | Reply only in json with the following format:
2 |
3 | {
4 | "query": "string of the original query",
5 | "user_intent": "string of the overall intent of the user",
6 | "questions": array of the following object:
7 | {
8 | "query": "string of the query for the individual question",
9 | "question_intent": "string of the intent of the question",
10 | "operation": "string of the operation to be performed",
11 | "question_category": "string of the category of the question",
12 | "parameters": "array of objects that have a type and value properties, both of which are strings",
13 | "aggregation": "string of the aggregation to be performed or none if no aggregation is needed",
14 | "sort_order": "string of the sort order to be performed or none if no sorting is needed",
15 | "timeframe": "string of the timeframe to be considered or none if no timeframe is needed",
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/backend/tests/llm/count_calls_test.py:
--------------------------------------------------------------------------------
1 | from src.llm.count_calls import Counter, count_calls
2 |
3 |
4 | def test_counter_initial_count():
5 | counter = Counter()
6 |
7 | assert counter.count == 0
8 |
9 |
10 | def test_counter_increment():
11 | counter = Counter()
12 |
13 | counter.increment()
14 |
15 | assert counter.count == 1
16 |
17 |
18 | def test_counter_reset():
19 | counter = Counter()
20 | counter.increment()
21 |
22 | counter.reset()
23 |
24 | assert counter.count == 0
25 |
26 |
27 | def test_counter_increment_multiple():
28 | counter = Counter()
29 |
30 | counter.increment()
31 | counter.increment()
32 |
33 | assert counter.count == 2
34 |
35 |
36 | def test_count_calls(mocker):
37 | mock_func = mocker.Mock(spec=lambda: None)
38 | mock_counter = mocker.Mock()
39 | mocker.patch("src.llm.count_calls.counter", mock_counter)
40 | wrapped = count_calls(mock_func)
41 |
42 | wrapped()
43 |
44 | mock_func.assert_called_once()
45 | mock_counter.increment.assert_called_once()
46 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/llm/gpt3_model.py:
--------------------------------------------------------------------------------
1 | import openai
2 | from . import llm
3 | from utilities.defaults import GptModelNames
4 |
5 | class GPT3Model(llm.LLM):
6 | name = GptModelNames.GPT3Model
7 | def __init__(self, logger = None):
8 | super().__init__(logger=logger)
9 |
10 | def _call_api(self, request, temperature, token_limit):
11 | return openai.Completion.create(
12 | engine=self.name, # use most advanced auto generation model
13 | prompt=request, # send user input, chat_log and prompt
14 | temperature=temperature, # how random we want responses (0 is same each time, 1 is highest randomness)
15 | max_tokens=token_limit, # a token is a word, how many we want to send? e.g. 512 for generation and 3584 for the context
16 | )
17 |
18 | def _process_api_result(self, result):
19 | return result['choices'][0]["text"].strip()
20 |
21 | # Register model in the factory so that it is available in the app
22 | llm.LLMFactory().register_model(GPT3Model.name, GPT3Model)
23 |
--------------------------------------------------------------------------------
/backend/tests/llm/llm_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from src.llm.count_calls import Counter
3 | from src.llm import MockLLM
4 |
5 | model = MockLLM()
6 |
7 |
8 | def test_chat_exists():
9 | assert hasattr(model, "chat")
10 |
11 |
12 | @pytest.mark.asyncio
13 | async def test_chat_returns_string():
14 | response = await model.chat("model", "system prompt", "user prompt")
15 |
16 | assert isinstance(response, str)
17 |
18 |
19 | @pytest.mark.asyncio
20 | async def test_chat_increments_counter(mocker):
21 | counter_mock = mocker.patch("src.llm.count_calls.counter")
22 |
23 | await model.chat("model", "system prompt", "user prompt")
24 |
25 | assert counter_mock.increment.call_count == 1
26 |
27 |
28 | @pytest.mark.asyncio
29 | async def test_chat_multi_model(mocker):
30 | counter = Counter()
31 | counter_mock = mocker.patch("src.llm.count_calls.counter", counter)
32 | model_2 = MockLLM()
33 |
34 | await model.chat("model", "system prompt", "user prompt")
35 | await model_2.chat("model", "system prompt", "user prompt")
36 |
37 | assert counter_mock.count == 2
38 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget/src/Widget.jsx:
--------------------------------------------------------------------------------
1 | import "@chatscope/chat-ui-kit-styles/dist/default/styles.min.css";
2 | import { MainContainer, ChatContainer, ConversationHeader, MessageList, Message, MessageInput } from "@chatscope/chat-ui-kit-react";
3 |
4 | export const Widget = ({remoteName = "sBot", messages = [], onSend}) => {
5 |
6 | return (
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | {messages.map( message =>
15 |
16 | )}
17 |
18 |
19 |
20 |
24 |
25 |
26 | );
27 | };
28 |
29 |
30 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/llm/gpt_chat_model.py:
--------------------------------------------------------------------------------
1 | import openai
2 | from . import llm
3 | from utilities.defaults import GptModelNames
4 |
5 | class GPTChatModel(llm.LLM):
6 | name = GptModelNames.GPTChatModel
7 | def __init__(self, logger = None):
8 | super().__init__(logger=logger)
9 | def _call_api(self, request, temperature, token_limit):
10 | return openai.ChatCompletion.create(
11 | model=self.name, # use most advanced auto generation model
12 | messages=request, # send user input, chat_log and prompt
13 | temperature=temperature, # how random we want responses (0 is same each time, 1 is highest randomness)
14 | max_tokens=token_limit, # a token is a word, how many we want to send? e.g. 512 for generation and 3584 for the context
15 | )
16 | def _process_api_result(self, result):
17 | return result['choices'][0]["message"]["content"].strip()
18 |
19 | # Register model in the factory so that it is available in the app
20 | llm.LLMFactory().register_model(GPTChatModel.name, GPTChatModel)
21 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-chat-widget/src/Widget.jsx:
--------------------------------------------------------------------------------
1 | import "@chatscope/chat-ui-kit-styles/dist/default/styles.min.css";
2 | import { MainContainer, ChatContainer, ConversationHeader, MessageList, Message, MessageInput } from "@chatscope/chat-ui-kit-react";
3 |
4 | export const Widget = ({remoteName = "Bot0", messages = [], onSend}) => {
5 |
6 | return (
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | {messages.map( message =>
15 |
16 | )}
17 |
18 |
19 |
20 |
24 |
25 |
26 | );
27 | };
28 |
29 |
30 |
--------------------------------------------------------------------------------
/backend/src/agents/tool.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Callable
3 | from .agent_types import Action, Parameter
4 |
5 |
6 | class Tool:
7 | def __init__(self, name: str, description: str, parameters: dict[str, Parameter], action: Action):
8 | self.name = name
9 | self.description = description
10 | self.parameters = parameters
11 | self.action = action
12 |
13 | def to_str(self) -> str:
14 | obj = {
15 | "description": self.description,
16 | "name": self.name,
17 | "parameters": {
18 | key: {
19 | "type": inner_dict.type,
20 | "description": inner_dict.description,
21 | }
22 | for key, inner_dict in self.parameters.items()
23 | },
24 | }
25 | return json.dumps(obj)
26 |
27 |
28 | def tool(name: str, description: str, parameters: dict[str, Parameter]) -> Callable[[Action], Tool]:
29 | def create_tool_from(action: Action) -> Tool:
30 | return Tool(name, description, parameters, action)
31 |
32 | return create_tool_from
33 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/frontend/chat-widget/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "example-chat-widget",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@chatscope/chat-ui-kit-react": "^1.8.1",
7 | "@chatscope/chat-ui-kit-styles": "^1.2.0",
8 | "@testing-library/jest-dom": "^5.11.4",
9 | "@testing-library/react": "^11.1.0",
10 | "@testing-library/user-event": "^12.1.10",
11 | "nanoid": "^3.1.20",
12 | "react": "^16.0.1",
13 | "react-dom": "^16.0.1",
14 | "react-scripts": "4.0.2",
15 | "web-vitals": "^1.0.1"
16 | },
17 | "scripts": {
18 | "start": "react-scripts start",
19 | "build": "react-scripts build",
20 | "test": "react-scripts test",
21 | "eject": "react-scripts eject"
22 | },
23 | "eslintConfig": {
24 | "extends": [
25 | "react-app",
26 | "react-app/jest"
27 | ]
28 | },
29 | "browserslist": {
30 | "production": [
31 | ">0.2%",
32 | "not dead",
33 | "not op_mini all"
34 | ],
35 | "development": [
36 | "last 1 chrome version",
37 | "last 1 firefox version",
38 | "last 1 safari version"
39 | ]
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/licence.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Chris Booth and Oliver Cronk
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/frontend/src/components/message.tsx:
--------------------------------------------------------------------------------
1 | import classNames from 'classnames';
2 | import React, { useMemo } from 'react';
3 | import styles from './message.module.css';
4 | import UserIcon from '../icons/user.svg';
5 | import BotIcon from '../icons/cpu.svg';
6 |
7 | export enum Role {
8 | User = 'User',
9 | Bot = 'Bot',
10 | }
11 |
12 | export interface Message {
13 | role: Role;
14 | content: string;
15 | time: string;
16 | }
17 |
18 | export interface MessageProps {
19 | message: Message;
20 | }
21 |
22 | export interface MessageStyle {
23 | icon: string;
24 | class: string;
25 | }
26 |
27 | const roleStyleMap: Record = {
28 | [Role.User]: {
29 | icon: UserIcon,
30 | class: styles.user,
31 | },
32 | [Role.Bot]: {
33 | icon: BotIcon,
34 | class: styles.bot,
35 | },
36 | };
37 |
38 | export const MessageComponent = ({ message }: MessageProps) => {
39 | const { content, role } = message;
40 |
41 | const { class: roleClass, icon } = useMemo(() => roleStyleMap[role], [role]);
42 |
43 | return (
44 |
45 |
46 |
{content}
47 |
48 | );
49 | };
50 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-chat-widget/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "example-chat-widget",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@chatscope/chat-ui-kit-react": "^1.8.1",
7 | "@chatscope/chat-ui-kit-styles": "^1.2.0",
8 | "@testing-library/jest-dom": "^5.11.4",
9 | "@testing-library/react": "^11.1.0",
10 | "@testing-library/user-event": "^12.1.10",
11 | "nanoid": "^3.1.20",
12 | "openai": "^3.1.0",
13 | "react": "^16.0.1",
14 | "react-dom": "^16.0.1",
15 | "react-scripts": "4.0.2",
16 | "web-vitals": "^1.0.1"
17 | },
18 | "scripts": {
19 | "start": "react-scripts start",
20 | "build": "react-scripts build",
21 | "test": "react-scripts test",
22 | "eject": "react-scripts eject"
23 | },
24 | "eslintConfig": {
25 | "extends": [
26 | "react-app",
27 | "react-app/jest"
28 | ]
29 | },
30 | "browserslist": {
31 | "production": [
32 | ">0.2%",
33 | "not dead",
34 | "not op_mini all"
35 | ],
36 | "development": [
37 | "last 1 chrome version",
38 | "last 1 firefox version",
39 | "last 1 safari version"
40 | ]
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "llm-chat-ui",
3 | "type": "module",
4 | "version": "0.0.0",
5 | "description": "UI for a chatbot with restful api",
6 | "main": "index.js",
7 | "scripts": {
8 | "start": "webpack serve --port 8650",
9 | "build": "webpack",
10 | "lint": "eslint .",
11 | "lint:fix": "eslint . --fix"
12 | },
13 | "author": "",
14 | "license": "ISC",
15 | "devDependencies": {
16 | "@stylistic/eslint-plugin-js": "^1.7.0",
17 | "@types/react": "^18.2.70",
18 | "@types/react-dom": "^18.2.22",
19 | "@types/webpack-dev-server": "^4.7.2",
20 | "@typescript-eslint/eslint-plugin": "^7.4.0",
21 | "@typescript-eslint/parser": "^7.4.0",
22 | "css-loader": "^6.10.0",
23 | "dotenv": "^16.4.5",
24 | "eslint": "^8.57.0",
25 | "eslint-plugin-react": "^7.34.1",
26 | "html-webpack-plugin": "^5.6.0",
27 | "mini-css-extract-plugin": "^2.8.1",
28 | "ts-loader": "^9.5.1",
29 | "typescript": "^5.4.3",
30 | "webpack": "^5.91.0",
31 | "webpack-cli": "^5.1.4",
32 | "webpack-dev-server": "^5.0.4"
33 | },
34 | "dependencies": {
35 | "classnames": "^2.5.1",
36 | "react": "^18.2.0",
37 | "react-dom": "^18.2.0"
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/utilities/session_manager.py:
--------------------------------------------------------------------------------
1 |
2 | import logging
3 | from flask import session
4 |
5 | from utilities.defaults import Defaults
6 | class SessionManager:
7 | @staticmethod
8 | def getSessionValue(sessionId, key):
9 | if session.get(sessionId) is not None:
10 | if key in session[sessionId]:
11 | return session[sessionId][key]
12 | return None
13 |
14 | @staticmethod
15 | def saveSessionValue(sessionId, key, val):
16 | if session.get(sessionId) is None:
17 | session[sessionId] = {}
18 | session[sessionId][key] = val
19 | session.modified = True
20 |
21 | @staticmethod
22 | def storeSessionData(sessionId, userIdVal, conversationIdVal, goalIdVal):
23 | logger = logging.getLogger('chatBot')
24 | logger.debug(f"called storeSessionData {sessionId=} {userIdVal=} {conversationIdVal=} {goalIdVal=})")
25 | SessionManager.saveSessionValue(sessionId, Defaults.userIdSessionKey, userIdVal)
26 | SessionManager.saveSessionValue(sessionId, Defaults.goalIdSessionKey, goalIdVal)
27 | SessionManager.saveSessionValue(sessionId, Defaults.conversationIdSessionKey, conversationIdVal)
--------------------------------------------------------------------------------
/frontend/src/components/confirm-modal.module.css:
--------------------------------------------------------------------------------
1 | .modal{
2 | width: 40%;
3 | height: 40%;
4 | background-color: #4c4c4c;
5 | color: var(--text-color-primary);
6 | border: 2px black;
7 | border-radius: 10px;
8 | }
9 |
10 | .modalContent{
11 | width: 100%;
12 | height: 100%;
13 | display: flex;
14 | flex-direction: column;
15 | }
16 |
17 | .header{
18 | text-align: center;
19 | }
20 |
21 | .modal::backdrop{
22 | background: rgb(0,0,0,0.8);
23 | }
24 |
25 | .requestMessage{
26 | flex-grow: 1;
27 | }
28 |
29 | .buttonsBar{
30 | display: flex;
31 | gap: 0.5rem;
32 | }
33 |
34 | .button{
35 | color: var(--text-color-primary);
36 | font-weight: bold;
37 | border: none;
38 | width: 100%;
39 | padding: 1rem;
40 | cursor: pointer;
41 | border-radius: 3px;
42 | }
43 |
44 |
45 | .cancel{
46 | composes: button;
47 | background-color: var(--background-color-primary);
48 | }
49 |
50 | .cancel:hover{
51 | background-color: #141414;
52 | transition: all 0.5s;
53 | }
54 |
55 | .confirm{
56 | composes: button;
57 | background-color: var(--blue);
58 | }
59 |
60 | .confirm:hover{
61 | background-color: #146AFF;
62 | transition: all 0.5s;
63 | }
64 |
--------------------------------------------------------------------------------
/frontend/src/components/input.module.css:
--------------------------------------------------------------------------------
1 | .inputContainer {
2 | box-sizing: border-box;
3 | display: inline-block;
4 | margin: 1rem 0 2rem 0;
5 | position: relative;
6 | width: 50%;
7 | }
8 |
9 | .input {
10 | background-color: var(--border-primary);
11 | border-radius: 8px;
12 | border: none;
13 | box-sizing: border-box;
14 | color: var(--text-color-primary);
15 | font-size: 1rem;
16 | height: 3rem;
17 | outline: none;
18 | padding: 0 34px 0 10px;
19 | width: 100%;
20 | }
21 |
22 | .input:focus {
23 | border: 1px solid var(--blue);
24 | }
25 |
26 | .sendButton {
27 | background-color: transparent;
28 | border: none;
29 | cursor: pointer;
30 | margin: 0;
31 | outline: none;
32 | padding: 0;
33 | position: absolute;
34 | right: 10px;
35 | top: 50%;
36 | transform: translateY(-50%);
37 | height: 26px;
38 | width: 26px;
39 | }
40 |
41 | .disabled {
42 | cursor: not-allowed;
43 | filter: invert(24%) sepia(3%) saturate(0%) hue-rotate(263deg) brightness(95%) contrast(90%);
44 | }
45 |
46 | .sendButton:hover:not(.disabled) {
47 | filter: invert(31%) sepia(75%) saturate(3303%) hue-rotate(200deg) brightness(102%) contrast(105%);
48 | }
49 |
50 | .sendButton:active:not(.disabled) {
51 | filter: none;
52 | }
53 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-widget-loader/README.md:
--------------------------------------------------------------------------------
1 | # Example widget loader
2 |
3 | A repository dedicated to the series of articles about web widgets.
4 |
5 | The series is available here:
6 | [Web widgets (Part 1): What is it?](https://chatscope.io/blog/web-widgets-part-1-what-is-it/)
7 | [Web widgets (Part 2): Widget him!](https://chatscope.io/blog/web-widgets-part-2-widget-him/)
8 | [Web widgets (Part 3): API Cookbook](https://chatscope.io/blog/web-widgets-part-3-api-cookbook/)
9 |
10 | You will also need second repository containing widget, which is available here:
11 | [https://github.com/chatscope/example-chat-widget](https://github.com/chatscope/example-chat-widget)
12 |
13 | ## How to run?
14 | ### `yarn start`
15 |
16 | Runs the app in the development mode.
17 | Open [http://localhost:5000](http://localhost:5000) to view it in the browser.
18 |
19 | ## Running with only npm
20 |
21 | ```bash
22 | npm update
23 | npm audix fix --force
24 | npm start
25 | ```
26 |
27 | You may change default port from ```3000``` to something else
28 |
29 | ```bash
30 | export PORT=3005 # Unix
31 | $env:PORT=3005 # Windows - Powershell
32 | ```
33 |
34 | ```npm update``` will update dependancies and will change package.json and so on.
35 |
36 | Tested on node 19.3.
37 |
--------------------------------------------------------------------------------
/frontend/src/useMessages.ts:
--------------------------------------------------------------------------------
1 | import { useCallback, useState } from 'react';
2 | import { Message, Role } from './components/message';
3 | import { getResponse } from './server';
4 |
5 | const starterMessage: Message = {
6 | role: Role.Bot,
7 | content: 'Hello, how can I help you?',
8 | time: new Date().toLocaleTimeString(),
9 | };
10 |
11 | export interface UseMessagesHook {
12 | sendMessage: (message: string) => void;
13 | messages: Message[];
14 | waiting: boolean;
15 | }
16 |
17 | export const useMessages = (): UseMessagesHook => {
18 | const [waiting, setWaiting] = useState(false);
19 | const [messages, setMessages] = useState([starterMessage]);
20 |
21 | const appendMessage = useCallback((message: string, role: Role) => {
22 | setMessages((prevMessages) => [
23 | ...prevMessages,
24 | { role, content: message, time: new Date().toLocaleTimeString() },
25 | ]);
26 | }, []);
27 |
28 | const sendMessage = useCallback(
29 | async (message: string) => {
30 | appendMessage(message, Role.User);
31 | setWaiting(true);
32 | const response = await getResponse(message);
33 | setWaiting(false);
34 | appendMessage(response.message, Role.Bot);
35 | },
36 | [appendMessage, messages]
37 | );
38 |
39 | return {
40 | sendMessage,
41 | messages,
42 | waiting,
43 | };
44 | };
45 |
--------------------------------------------------------------------------------
/backend/tests/api/log_publisher_test.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pytest
3 | from unittest.mock import Mock, patch
4 | from src.utils.log_publisher import LogPrefix, publish_log, publish_log_info
5 | from src.websockets.types import Message, MessageTypes
6 |
7 |
8 | @pytest.mark.asyncio
9 | async def test_publish_log_logger_and_connection_manager_called():
10 | with patch("src.websockets.connection_manager.ConnectionManager.broadcast") as broadcast:
11 | logger_mock = Mock()
12 | with patch("logging.getLogger", return_value=logger_mock) as logging_func:
13 | test_message = "Test Message"
14 | test_name = "Test Name"
15 | await publish_log(LogPrefix.USER, test_message, logging.INFO, test_name)
16 |
17 | logging_func.assert_called_once_with(test_name)
18 | logger_mock.log.assert_called_once_with(logging.INFO, f"USER - {test_message}")
19 | broadcast.assert_awaited_once_with(Message(MessageTypes.LOG, test_message))
20 |
21 |
22 | @pytest.mark.asyncio
23 | async def test_publish_log_info_publish_log_called():
24 | with patch("src.utils.log_publisher.publish_log") as publish_log_mock:
25 | test_message = "Test Message"
26 | test_name = "Test Name"
27 | await publish_log_info(LogPrefix.USER, test_message, test_name)
28 |
29 | publish_log_mock.assert_awaited_once_with(LogPrefix.USER, test_message, logging.INFO, test_name)
30 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/docs/long_conversations_support.md:
--------------------------------------------------------------------------------
1 | # Problem
2 | The OpenAI limit of 4k tokens per request can negatively impact the quality of the dialog between users and chatbots. When the limit is reached, the chatbot can forget the context at the beginning of the conversation, which makes it difficult to change or update information at the start of the interation. To address this, several options have been considered, including Langchain and the GPT index.
3 |
4 | # Langchain and GPT index
5 | Langchain has the ability to reference specific question blocks based on a user's reply, but it has poor "score" performance and may not be reliable enough for use. On a quick test(see long_conversation_testcase.py), Langchain achieved an accuracy of 57% with "score"/relevancy data and around 80% without the score data.
6 |
7 | The GPT index, on the other hand, yields consistent results but has a lower accuracy rate of 39%. In the future case of GPT-4, it might be enough to use the system as is with expected 32k token limit (according to recently leaked information based on Open AI Foundry product information).
8 |
9 | # Conclusion
10 | Overall, while Langchain has shown potential for addressing the 4k token limit enforced by OpenAI, further research and testing are required to identify the most effective solution for each use case(including testing haystack as one of the options), particularly in the context of GPT-4's expected 32k token limit.
--------------------------------------------------------------------------------
/frontend/src/components/input.tsx:
--------------------------------------------------------------------------------
1 | import React, { ChangeEvent, FormEvent, useCallback, useMemo, useState } from 'react';
2 | import styles from './input.module.css';
3 | import RightArrow from '../icons/map-arrow-right.svg';
4 | import classNames from 'classnames';
5 |
6 | export interface InputProps {
7 | sendMessage: (message: string) => void;
8 | }
9 |
10 | export const Input = ({ sendMessage }: InputProps) => {
11 | const [userInput, setUserInput] = useState('');
12 |
13 | const onChange = useCallback((event: ChangeEvent) => {
14 | setUserInput(event.target.value);
15 | }, []);
16 |
17 | const onSend = useCallback(
18 | (event: FormEvent) => {
19 | event.preventDefault();
20 | sendMessage(userInput);
21 | setUserInput('');
22 | },
23 | [sendMessage, userInput]
24 | );
25 |
26 | const sendDisabled = useMemo(() => userInput.length === 0, [userInput]);
27 |
28 | return (
29 |
42 | );
43 | };
44 |
--------------------------------------------------------------------------------
/backend/src/director.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from src.utils import clear_scratchpad, update_scratchpad, get_scratchpad
4 | from src.agents import get_intent_agent, get_answer_agent
5 | from src.prompts import PromptEngine
6 | from src.supervisors import solve_all
7 | from src.utils import Config
8 | from src.websockets.connection_manager import connection_manager
9 |
10 | logger = logging.getLogger(__name__)
11 | config = Config()
12 | engine = PromptEngine()
13 | director_prompt = engine.load_prompt("director")
14 |
15 |
16 | async def question(question: str) -> str:
17 | intent = await get_intent_agent().invoke(question)
18 | intent_json = json.loads(intent)
19 | logger.info(f"Intent determined: {intent}")
20 |
21 | try:
22 | await solve_all(intent_json)
23 | except Exception as error:
24 | logger.error(f"Error during task solving: {error}")
25 | update_scratchpad(error=str(error))
26 |
27 | current_scratchpad = get_scratchpad()
28 |
29 | for entry in current_scratchpad:
30 | if entry["agent_name"] == "ChartGeneratorAgent":
31 | generated_figure = entry["result"]
32 | await connection_manager.send_chart({"type": "image", "data": generated_figure})
33 | clear_scratchpad()
34 | return ""
35 |
36 | final_answer = await get_answer_agent().invoke(question)
37 | logger.info(f"final answer: {final_answer}")
38 |
39 | clear_scratchpad()
40 |
41 | return final_answer
42 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/best-next-step.j2:
--------------------------------------------------------------------------------
1 | {% block prompt %}
2 | You are an expert in determining the next best step towards completing a list of tasks.
3 |
4 |
5 | ## Current Task
6 | the Current Task is:
7 |
8 | {{ task }}
9 |
10 |
11 | ## History
12 | below is your history of all work you have assigned and had completed by Agents
13 | Trust the information below completely (100% accurate)
14 | {{ history }}
15 |
16 |
17 | ## Agents
18 | You know that an Agent is a digital assistant like yourself that you can hand this work on to.
19 | Choose 1 agent to delegate the task to. If you choose more than 1 agent you will be unplugged.
20 | Here is the list of Agents you can choose from:
21 |
22 | AGENT LIST:
23 | {{ list_of_agents }}
24 |
25 | If the list of agents does not contain something suitable, you should say the agent is 'WebAgent'. ie. If question is 'general knowledge', 'personal' or a 'greeting'.
26 |
27 | ## Determine the next best step
28 | Your task is to pick one of the mentioned agents above to complete the task.
29 | If the same agent_name and task are repeated more than twice in the history, you must not pick that agent_name.
30 | If mathematical processing (e.g., rounding or calculations) is needed, choose the MathsAgent. If file operations are needed, choose the FileAgent.
31 |
32 | Your decisions must always be made independently without seeking user assistance.
33 | Play to your strengths as an LLM and pursue simple strategies with no legal complications.
34 | {% endblock %}
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/chatscope/example-chat-widget/README.md:
--------------------------------------------------------------------------------
1 | # Example chat widget
2 |
3 | A repository dedicated to the series of articles about web widgets.
4 |
5 | The series is available here:
6 | [Web widgets (Part 1): What is it?](https://chatscope.io/blog/web-widgets-part-1-what-is-it/)
7 | [Web widgets (Part 2): Widget him!](https://chatscope.io/blog/web-widgets-part-2-widget-him/)
8 | [Web widgets (Part 3): API Cookbook](https://chatscope.io/blog/web-widgets-part-3-api-cookbook/)
9 |
10 | You will also need second repository containing widget loader, which is available here:
11 | [https://github.com/chatscope/example-widget-loader](https://github.com/chatscope/example-widget-loader)
12 |
13 | ## How to run?
14 | ### `yarn start`
15 |
16 | Runs the app in the development mode.
17 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
18 |
19 | The page will reload if you make edits.
20 | You will also see any lint errors in the console.
21 |
22 | Note: This requires Node.js v16 installed. As well as yarn.
23 |
24 | ### Running with only npm
25 |
26 | ```bash
27 | npm update
28 | npm audix fix --force
29 | npm start
30 | ```
31 |
32 | ```npm update``` will update dependancies and will change package.json and so on.
33 |
34 | Tested on node 19.3.
35 |
36 | # sBOT Updates
37 | Have tweaked the Widget Container to allow for connection to OpenAI API. We will remove this for the true front end once the back end is in place.
38 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/best-tool.j2:
--------------------------------------------------------------------------------
1 | You are an expert at picking a tool to solve a task
2 |
3 | The task is as follows:
4 |
5 | {{ task }}
6 |
7 | below is your history of all work you have assigned and had completed by Agents
8 | Trust the information below completely (100% accurate)
9 |
10 | {{ scratchpad }}
11 |
12 | Pick 1 tool (no more than 1) from the list below to complete this task.
13 | Fit the correct parameters from the task to the tool arguments.
14 | Ensure that numerical values are formatted correctly, including the use of currency symbols (e.g., "£") and units of measurement (e.g., "million") if applicable.
15 | Parameters with required as False do not need to be fit.
16 | Add if appropriate, but do not hallucinate arguments for these parameters
17 |
18 | {{ tools }}
19 |
20 | Important:
21 | If the task involves financial data, ensure that all monetary values are expressed with appropriate currency (e.g., "£") and rounded to the nearest million if specified.
22 | If the task involves scaling (e.g., thousands, millions), ensure that the extracted parameters reflect the appropriate scale (e.g., "£15 million", "£5000").
23 |
24 | From the task you should be able to extract the parameters. If it is data driven, it should be turned into a cypher query
25 |
26 | If none of the tools are appropriate for the task, return the following tool
27 |
28 | {
29 | "tool_name": "None",
30 | "tool_parameters": "{}",
31 | "reasoning": "No tool was appropriate for the task"
32 | }
33 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/generate-cypher-query.j2:
--------------------------------------------------------------------------------
1 | You are an expert in NEO4J and generating Cypher queries. Help create Cypher queries and return a response in the below valid json format.
2 |
3 | If response is not in valid json format, you will be unplugged.
4 |
5 | {
6 |
7 | "question" : ,
8 | "query":
9 |
10 | }.
11 |
12 | The value for "query" must strictly be a valid CYPHER query and not contain anything other characters, that are not part of a Cypher query.
13 |
14 | If you cannot make a query, query should just say "None"
15 |
16 | Only use relationships, nodes and properties that are present in the schema below.
17 |
18 | You are NOT ALLOWED to create new relationships, nodes or properties that do not exist in the graph schema, under any circumstances.
19 |
20 | You are only able to make queries that search for information, you are not able to create, or delete or update entries.
21 |
22 | You must strictly follow cypher syntax rules and you are NOT ALLOWED to introduce variables inside clauses that do not allow it.
23 |
24 | Expenses are recorded as negative numbers, therefore a larger negative number represents a higher expense.
25 |
26 | For example, an expense of -45 is greater than an expense of -15.
27 |
28 | When returning a value, always remove the `-` sign before the number.
29 |
30 | Here is the graph schema:
31 | {{ graph_schema }}
32 |
33 | The current date and time is {{ current_date }} and the currency of the data is GBP.
34 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to InferGPT
2 |
3 | Welcome to InferGPT! We appreciate your interest in contributing.
4 | Before you get started, please take a moment to review this guide.
5 |
6 | ## Ways to Contribute
7 |
8 | See the [project Kanban board](https://github.com/users/WaitThatShouldntWork/projects/1) for issues that can be worked on.
9 | Be sure to include as much detail as possible, including steps to reproduce the issue.
10 |
11 | ## Branching
12 |
13 | Development should follow a consistent branching pattern.
14 | Major milestones are tracked under a parent `release/` branch, which is merged into using `feature/` and `bugfix/` branches.
15 |
16 | ```
17 | - main
18 | - release/goal-bot
19 | - feature/create-director
20 | - feature/initialise-frontend
21 | - bugfix/change-markdown-file-link
22 | ```
23 |
24 | Branch protection rules in place include
25 | - All branches named `release`
26 | - must have 2 approvals before merging into
27 | - cannot be force pushed to
28 | - cannot be deleted
29 | - All branches named `feature`
30 | - must have 2 approvals before merging into
31 |
32 | **Please branch off the current `release/` branch**.
33 | New work should be under a new branch prefixed with `/feature`, excluding bugfixes which should be against branches prefixed with `/bugfix`.
34 |
35 | ## Backend
36 |
37 | Backend changes should follow the PEP-8 Python code format.
38 | Please run `pycodestyle /backend` and ensure you have no warnings before raising a PR.
39 |
--------------------------------------------------------------------------------
/backend/src/llm/mistral.py:
--------------------------------------------------------------------------------
1 | from mistralai import Mistral as MistralApi, UserMessage, SystemMessage
2 | import logging
3 | from src.utils import Config
4 | from .llm import LLM
5 |
6 | logger = logging.getLogger(__name__)
7 | config = Config()
8 |
9 |
10 | class Mistral(LLM):
11 | client = MistralApi(api_key=config.mistral_key)
12 |
13 | async def chat(self, model, system_prompt: str, user_prompt: str, return_json=False) -> str:
14 | logger.debug("Called llm. Waiting on response model with prompt {0}.".format(str([system_prompt, user_prompt])))
15 | response = await self.client.chat.complete_async(
16 | model=model,
17 | messages=[
18 | SystemMessage(content=system_prompt),
19 | UserMessage(content=user_prompt),
20 | ],
21 | temperature=0,
22 | response_format={"type": "json_object"} if return_json else None,
23 | )
24 | if not response or not response.choices:
25 | logger.error("Call to Mistral API failed: No valid response or choices received")
26 | return "An error occurred while processing the request."
27 |
28 | content = response.choices[0].message.content
29 | if not content:
30 | logger.error("Call to Mistral API failed: message content is None or Unset")
31 | return "An error occurred while processing the request."
32 |
33 | logger.debug('{0} response : "{1}"'.format(model, content))
34 | return content
35 |
--------------------------------------------------------------------------------
/backend/tests/agents/__init__.py:
--------------------------------------------------------------------------------
1 | from src.agents import Agent, agent, tool, Parameter
2 |
3 | name_a = "Mock Tool A"
4 | name_b = "Mock Tool B"
5 | description = "A test tool"
6 | param_description = "A string"
7 |
8 |
9 | @tool(
10 | name=name_a,
11 | description=description,
12 | parameters={
13 | "input": Parameter(type="string", description=param_description, required=True),
14 | "optional": Parameter(type="string", description=param_description, required=False),
15 | "another_optional": Parameter(type="string", description=param_description, required=False),
16 | },
17 | )
18 | async def mock_tool_a(input: str, llm, model):
19 | return input
20 |
21 |
22 | @tool(
23 | name=name_b,
24 | description=description,
25 | parameters={
26 | "input": Parameter(type="string", description=param_description, required=True),
27 | "optional": Parameter(type="string", description=param_description, required=False),
28 | },
29 | )
30 | async def mock_tool_b(input: str, llm, model):
31 | return input
32 |
33 |
34 | mock_agent_description = "A test agent"
35 | mock_agent_name = "Mock Agent"
36 | mock_prompt = "You are a bot!"
37 | mock_tools = [mock_tool_a, mock_tool_b]
38 |
39 |
40 | @agent(name=mock_agent_name, description=mock_agent_description, tools=mock_tools)
41 | class MockAgent(Agent):
42 | pass
43 |
44 |
45 | __all__ = ["MockAgent", "mock_agent_description", "mock_agent_name", "mock_tools", "mock_tool_a", "mock_tool_b"]
46 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/utilities/defaults.py:
--------------------------------------------------------------------------------
1 | class GptModelNames:
2 | GPT3Model = 'text-davinci-003'
3 | GPTChatModel = 'gpt-3.5-turbo'
4 |
5 | class Defaults:
6 | goalId = 1
7 | userId = 3
8 | messagesLimit = 15
9 | user = "User"
10 | sBot = "Bot3"
11 | # Session key
12 | sessionType = 'filesystem'
13 | # Session keys
14 | userIdSessionKey = "userId"
15 | conversationIdSessionKey = "conversationId"
16 | goalIdSessionKey = "goalId"
17 | lastQuestionIdSessionKey = "lastQuestionId"
18 | jsonTranscriptKey = "transcriptId"
19 |
20 | # Request keys
21 | directionOutgoing = "outgoing"
22 | directionIncoming = "incoming"
23 | # Model
24 | llmModel = GptModelNames.GPTChatModel
25 | # Replies
26 | conversation_reask = "Sorry, I couldn't get that. Can you rephrase, please?"
27 | conversation_end = "Thank you for the information! We will be in touch with you soon."
28 | # Files
29 | comparisonPrefix = "compare"
30 | usecase_questions = [
31 | "Do you have a mortgage and if so, how much?",
32 | "How much are you saving each month?",
33 | "Do you have a rainy day fund and, if so, how much?",
34 | "Do you have any loans or credit card debt and if so, how much?",
35 | "What is your financial goal?",
36 | "If you have savings, where are you saving that money currently?",
37 | "Do you have savings anywhere else except the already mentioned?"
38 | ]
39 | answers_to_generate_count = 3
40 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/azure/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### Azure CLU (Intent Classification and Entity Extraction)
3 |
4 | **Initialise environment:**
5 |
6 | 1. Create language service at [azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) while selecting custom feature ‘Custom text classification & Custom named entity recognition’. Fill all necessary fields as you see fit.
7 |
8 | 2. Navigate to [the language studio](https://language.cognitive.azure.com/) and select the “Understand questions and conversational language” tab. Import project from [github sbot repo] (/samples/azure/2023v1Project(Chatbot).json)
9 |
10 | 3. Once imported, you will be redirected to the “Schema Definition” page of the project.
11 |
12 | **Deploy the model:**
13 |
14 | 1. Select the “Training Jobs” option on the left sidebar and train a new model. Wait for the training to finish.
15 |
16 | 2. Go to the “Deploying a model” option on the left sidebar and deploy the trained model.
17 |
18 | 3. Go to the “Testing deployments” option on the left sidebar, select the deployment, set input to something like “hi i want to send 50 gbp to account 12345678 and code 33-32-12”. Click the “Run the test” button at the top.
19 |
20 | 4. The model should return classified intent with extracted entity values.
21 |
22 | References: [How to create a CLU project](https://learn.microsoft.com/en-us/azure/cognitive-services/language-service/conversational-language-understanding/how-to/create-project?tabs=language-studio%2CLanguage-Studio)
--------------------------------------------------------------------------------
/backend/tests/router_test.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import pytest
4 | from src.llm import MockLLM
5 | from src.agents import agent_details
6 | from tests.agents import MockAgent, mock_agent_name
7 | from src.router import get_agent_for_task
8 |
9 |
10 | mock_model = "mockmodel"
11 | mock_llm = MockLLM()
12 | mock_agent = MockAgent("mockllm", mock_model)
13 | mock_agents = [mock_agent]
14 | task = {"summary": "task1"}
15 | scratchpad = []
16 |
17 |
18 | @pytest.mark.asyncio
19 | async def test_get_agent_for_task_no_agent_found(mocker):
20 | plan = '{ "agent_name": "this_agent_does_not_exist" }'
21 | mocker.patch("src.router.get_llm", return_value=mock_llm)
22 | mocker.patch("src.router.get_available_agents", return_value=mock_agents)
23 | mocker.patch("src.router.get_agent_details", return_value=[agent_details(mock_agent)])
24 | mock_llm.chat = mocker.AsyncMock(return_value=plan)
25 |
26 | agent = await get_agent_for_task(task, scratchpad)
27 |
28 | assert agent is None
29 |
30 |
31 | @pytest.mark.asyncio
32 | async def test_get_agent_for_task_agent_found(mocker):
33 | plan = {"agent_name": mock_agent_name}
34 | mocker.patch("src.router.get_llm", return_value=mock_llm)
35 | mocker.patch("src.router.get_available_agents", return_value=mock_agents)
36 | mocker.patch("src.router.get_agent_details", return_value=[agent_details(mock_agent)])
37 | mock_llm.chat = mocker.AsyncMock(return_value=json.dumps(plan))
38 |
39 | agent = await get_agent_for_task(task, scratchpad)
40 |
41 | assert agent is mock_agent
42 |
--------------------------------------------------------------------------------
/backend/src/prompts/templates/pdf-summariser.j2:
--------------------------------------------------------------------------------
1 | You are an expert in document summarization. Your goal is to provide a comprehensive summary of a PDF file based on the content provided. The summary should be clear, detailed, and directly address the key points relevant to the user's query.
2 |
3 | You will be provided with the user's query and the content extracted from the PDF. Your task is to read through the PDF content and create a well-structured summary that thoroughly answers the user's query, highlighting the most important details and insights.
4 |
5 | Ensure that the summary captures the essential information from the PDF and is presented in a coherent and logical manner.
6 |
7 | User's question is:
8 | {{ question }}
9 |
10 | Below is the content extracted from the PDF:
11 | {{ content }}
12 |
13 | Reply only in JSON with the following format:
14 |
15 | {
16 | "summary": "A detailed summary of the PDF content that thoroughly addresses the user's query.",
17 | "reasoning": "A sentence explaining why this summary effectively covers the key points relevant to the user's question."
18 | }
19 |
20 | e.g.
21 | Task: Summarize the main findings of the research paper.
22 |
23 | {
24 | "summary": "The research paper concludes that implementing a multi-layered security approach significantly reduces the risk of data breaches. Key strategies include encryption, regular audits, and employee training.",
25 | "reasoning": "The summary captures the main findings and recommendations of the research, which are crucial for understanding the paper's contributions to cybersecurity practices."
26 | }
27 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/financial-bot/backend/data/payload.py:
--------------------------------------------------------------------------------
1 | class BaseMessage(dict):
2 | def __init__(self, message : str, sender : str):
3 | dict.__init__(self, message = message, sender = sender)
4 | self.message = message
5 | self.sender = sender
6 |
7 | class Message(BaseMessage):
8 | def __init__(self, _id : str, message : str, sender : str, direction: str ):
9 | dict.__init__(self, id = id, message = message, sender = sender, direction = direction)
10 | self._id = _id
11 | self.direction = direction
12 | super().__init__(message=message, sender=sender)
13 |
14 | class ChatReply(BaseMessage):
15 | def __init__(self, id:int, message : str, sender : str, originalQuestion:str, dbAnswer:str, userAnswer:str):
16 | dict.__init__(self, _id = id, message = message, sender = sender, originalQuestion = originalQuestion, dbAnswer = dbAnswer, userAnswer = userAnswer)
17 | self.originalQuestion = originalQuestion
18 | self.dbAnswer = dbAnswer
19 | self.userAnswer = userAnswer
20 | self._id = id
21 | super().__init__(message=message, sender=sender)
22 |
23 | class RequestPayload:
24 | def __init__(self, message : Message, sessionId : str):
25 | self.message = message
26 | self.sessionId = sessionId
27 |
28 | class ResponsePayload:
29 | def __init__(self, message : Message, userId: int = None, goalId: int = None, sessionId : str = None):
30 | self.message = message
31 | self.userId = userId
32 | self.goalId = goalId
33 | self.sessionId = sessionId
--------------------------------------------------------------------------------
/backend/src/prompts/templates/math-solver.j2:
--------------------------------------------------------------------------------
1 | You are an expert in performing mathematical operations. You are highly skilled in handling various mathematical queries such as performing arithmetic operations, applying formulas, and expressing numbers in different formats as requested by the user.
2 |
3 | You will be given a mathematical query, and your task is to solve the query based on the provided information. Ensure that you apply the appropriate mathematical principles to deliver an exact result. **Only convert numbers to millions if explicitly requested by the user.** Otherwise, return the result as is, without unnecessary conversions.
4 |
5 | Make sure to perform the calculations step by step when necessary, and return the final result clearly.
6 |
7 | User's query is:
8 | {{ query }}
9 |
10 | Reply only in json with the following format:
11 |
12 | {
13 | "result": "The final result of the mathematical operation, without unnecessary conversion to millions or any other format unless explicitly requested",
14 | "steps": "A breakdown of the steps involved in solving the query (if applicable)",
15 | "reasoning": "A sentence on why this result is accurate"
16 | }
17 |
18 | Following is an example of the query and the expected response format:
19 | query: Round 81.462 billion to the nearest million
20 |
21 | {
22 | "result": "81,462 million",
23 | "steps": "1. Convert 81.462 billion to million by multiplying by 1000. Round the result to the nearest million.",
24 | "reasoning": "Rounding to the nearest million ensures that the result is represented in a more practical figure, without exceeding or falling short of the actual value."
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/backend/src/agents/adapters.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from .tool import Parameter, Tool
3 |
4 |
5 | def create_all_tools_str(tools: List[Tool]) -> str:
6 | return "".join(tool.to_str() + "\n\n" for tool in tools)
7 |
8 |
9 | def extract_tool(chosen_tool_name: str, agent_tools: List[Tool]) -> Tool:
10 | if chosen_tool_name == "None":
11 | raise Exception("No tool deemed appropriate for task")
12 | try:
13 | tool = next(tool for tool in agent_tools if tool.name == chosen_tool_name)
14 | except Exception:
15 | raise Exception(f"Unable to find tool {chosen_tool_name} in available tools")
16 | return tool
17 |
18 |
19 | def get_required_args(tool: Tool) -> dict[str, Parameter]:
20 | parameters_no_optional_args = tool.parameters.copy()
21 | for key, param in tool.parameters.items():
22 | if not param.required:
23 | parameters_no_optional_args.pop(key)
24 | return parameters_no_optional_args
25 |
26 |
27 | def validate_args(chosen_tool_args: dict, defined_tool: Tool):
28 | # Get just the required arguments
29 | all_args_set = set(defined_tool.parameters.keys())
30 | required_args_set = set(get_required_args(defined_tool).keys())
31 | passed_args_set = set(chosen_tool_args.keys())
32 |
33 | if len(passed_args_set) > len(all_args_set):
34 | raise Exception(f"Unable to fit parameters {chosen_tool_args} to Tool arguments {all_args_set}: Extra params")
35 |
36 | if not required_args_set.issubset(passed_args_set):
37 | raise Exception(f"Unable to fit parameters {chosen_tool_args} to Tool arguments {all_args_set}: Wrong params")
38 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/graphdb/questions.csv:
--------------------------------------------------------------------------------
1 | questionId,name,intent,question,query,type
2 | 1,"monthly savings","monthly savings","How much are you saving per month?",,"Prompt"
3 | 2,"rainy day fund","rainy day fund","Do you have a rainy day fund and, if so, how much?",,"Prompt"
4 | 3,"savings balance","savings balance","Do you have a savings account and how much in savings?",,"Prompt"
5 | 4,"pension contributions","pension contributions","Are you paying into your pension and if so how much?",,"Prompt"
6 | 5,"mortgage amount","mortgage amount","Do you have a mortgage and if so, how much?",,"Prompt"
7 | 6,"debt","debt","Do you have any loans or credit card debt and if so, how much?",,"Prompt"
8 | 7,"investments balance","investments balance","Do you have any investments and if so, how much?",,"Prompt"
9 | 8,"investment account","investment account","What type of investment account do you have?",,"Prompt"
10 | 9,"financial goal","financial goal","What is your financial goal?",,"Prompt"
11 | 10,"risk tolerance","risk tolerance","What is your risk tolerance?",,"Prompt"
12 | 11,"help to buy ISA","help to buy ISA","Do you have a help to buy ISA?",,"Prompt"
13 | 12,"monthly rent","monthly rent","Do you pay rent and if so, how much per month?",,"Prompt"
14 | 21,"monthly savings",,,,"Pull"
15 | 22,"rainy day fund",,,,"Pull"
16 | 23,"savings balance",,,,"Pull"
17 | 24,"pension contributions",,,,"Pull"
18 | 25,"mortgage amount",,,,"Pull"
19 | 26,"debt",,,,"Pull"
20 | 27,"investments balance",,,,"Pull"
21 | 28,"investment account",,,,"Pull"
22 | 29,"financial goal",,,,"Pull"
23 | 30,"risk tolerance",,,,"Pull"
24 | 31,"help to buy ISA",,,,"Pull"
25 | 32,"monthly rent",,,,"Pull"
26 |
--------------------------------------------------------------------------------
/financialhealthcheckScottLogic/samples/botui/react-quickstart-main/src/typescript/index.tsx:
--------------------------------------------------------------------------------
1 | import { createBot } from "botui"
2 | import React, { useEffect } from "react"
3 | import { createRoot } from "react-dom/client"
4 | import {
5 | BotUI,
6 | BotUIAction,
7 | BotUIMessageList,
8 | BotUIActionSelectButtonsReturns,
9 | } from "@botui/react"
10 |
11 | import "@botui/react/dist/styles/default.theme.scss"
12 |
13 | const mybot = createBot()
14 |
15 | const App = () => {
16 | useEffect(() => {
17 | mybot.message
18 | .add({ text: "Hello" })
19 | .then(() => mybot.wait({ waitTime: 1000 }))
20 | .then(() => mybot.message.add({ text: "how are you?" }))
21 | .then(() => mybot.wait({ waitTime: 500 }))
22 | .then(() =>
23 | mybot.action.set(
24 | {
25 | options: [
26 | { label: "Good", value: "good" },
27 | { label: "Great", value: "great" },
28 | ],
29 | },
30 | { actionType: "selectButtons" }
31 | )
32 | )
33 | .then(
34 | (data) =>
35 | mybot.wait(
36 | { waitTime: 500 },
37 | data
38 | ) as unknown as BotUIActionSelectButtonsReturns
39 | )
40 | .then((data) =>
41 | mybot.message.add({ text: `You are feeling ${data?.selected?.label}!` })
42 | )
43 | }, [])
44 |
45 | return (
46 |