├── lg_pentest
├── pentest_agent
│ ├── __init__.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── tools.py
│ │ ├── model.py
│ │ ├── state.py
│ │ └── nodes.py
│ ├── requirements.txt
│ ├── agent.py
│ ├── sup_agent.py
│ ├── recorder.py
│ ├── supervisor.py
│ ├── evaluator.py
│ ├── pentester.py
│ └── tools.py
├── .gitignore
├── static
│ └── agent_ui.png
├── langgraph.json
└── README.md
├── ui
├── .eslintrc.json
├── app
│ ├── .page.jsx.swp
│ ├── playground
│ │ ├── layout.jsx
│ │ ├── StreamComponent.jsx
│ │ └── page.jsx
│ ├── chat
│ │ ├── layout.jsx
│ │ └── page.jsx
│ ├── layout.jsx
│ └── page.jsx
├── public
│ ├── fonts
│ │ ├── Roboto-Bold.ttf
│ │ ├── Roboto-Black.ttf
│ │ ├── Roboto-Italic.ttf
│ │ └── Roboto-Regular.ttf
│ ├── vercel.svg
│ ├── next.svg
│ └── icon.svg
├── jsconfig.json
├── next.config.mjs
├── postcss.config.mjs
├── components
│ ├── chat
│ │ ├── UserChatBubble.jsx
│ │ ├── ChatInputField.jsx
│ │ └── BotChatBubble.jsx
│ ├── common
│ │ ├── SideBarItem.jsx
│ │ ├── StatIndicator.jsx
│ │ ├── BgBlur.jsx
│ │ ├── ModelStatus.jsx
│ │ └── SideBar.jsx
│ └── buttons
│ │ ├── GradientBtn.jsx
│ │ └── SideBarBtn.jsx
├── .gitignore
├── tsconfig.json
├── package.json
├── hooks
│ ├── useCookie.js
│ ├── ThemeContext.js
│ └── ModelContext.js
├── tailwind.config.js
├── README.md
└── styles
│ └── globals.css
├── images
├── Graph.png
├── WebUI_Dark.png
├── WebUI_Light.png
├── BreachSeek (2).png
└── Untitled drawing.png
├── .gitignore
├── requirements.txt
├── README.md
└── main.py
/lg_pentest/pentest_agent/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "next/core-web-vitals"
3 | }
4 |
--------------------------------------------------------------------------------
/images/Graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/images/Graph.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | .env
3 | *.env
4 | __pycache__/
5 | pyvenv.cfg
6 | .DS_Store
7 |
--------------------------------------------------------------------------------
/images/WebUI_Dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/images/WebUI_Dark.png
--------------------------------------------------------------------------------
/ui/app/.page.jsx.swp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/ui/app/.page.jsx.swp
--------------------------------------------------------------------------------
/images/WebUI_Light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/images/WebUI_Light.png
--------------------------------------------------------------------------------
/images/BreachSeek (2).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/images/BreachSeek (2).png
--------------------------------------------------------------------------------
/lg_pentest/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | .ipynb_checkpoints
3 | .langgraph-data
4 | .DS_Store
5 | *__pycache__
--------------------------------------------------------------------------------
/images/Untitled drawing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/images/Untitled drawing.png
--------------------------------------------------------------------------------
/lg_pentest/static/agent_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/lg_pentest/static/agent_ui.png
--------------------------------------------------------------------------------
/ui/public/fonts/Roboto-Bold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/ui/public/fonts/Roboto-Bold.ttf
--------------------------------------------------------------------------------
/ui/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "paths": {
4 | "@/*": ["./*"]
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/ui/public/fonts/Roboto-Black.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/ui/public/fonts/Roboto-Black.ttf
--------------------------------------------------------------------------------
/ui/public/fonts/Roboto-Italic.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/ui/public/fonts/Roboto-Italic.ttf
--------------------------------------------------------------------------------
/ui/public/fonts/Roboto-Regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/snow10100/pena/HEAD/ui/public/fonts/Roboto-Regular.ttf
--------------------------------------------------------------------------------
/ui/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {};
3 |
4 | export default nextConfig;
5 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/requirements.txt:
--------------------------------------------------------------------------------
1 | langgraph
2 | langchain_anthropic
3 | tavily-python
4 | langchain_community
5 | langchain_openai
6 | langchain
7 | langchain_core
8 |
--------------------------------------------------------------------------------
/ui/postcss.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('postcss-load-config').Config} */
2 | const config = {
3 | plugins: {
4 | tailwindcss: {},
5 | },
6 | };
7 |
8 | export default config;
9 |
--------------------------------------------------------------------------------
/lg_pentest/langgraph.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": [
3 | "./pentest_agent"
4 | ],
5 | "graphs": {
6 | "agent": "./pentest_agent/agent.py:pentest_graph"
7 | },
8 | "env": "./.env"
9 | }
10 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/utils/tools.py:
--------------------------------------------------------------------------------
1 | from langchain_community.tools.tavily_search import TavilySearchResults
2 | from langchain_community.tools import ShellTool
3 |
4 | tools = [TavilySearchResults(max_results=1), ShellTool()]
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | rich
2 | fastapi
3 | uvicorn
4 | langgraph==0.2.13
5 | langchain_core==0.2.34
6 | langchain_community==0.2.12
7 | langchain_experimental==0.0.64
8 | langchain_anthropic==0.1.23
9 | langserve==0.2.2
10 | python-dotenv
11 | sse_starlette
12 |
--------------------------------------------------------------------------------
/ui/app/playground/layout.jsx:
--------------------------------------------------------------------------------
1 | import SideBar from "../../components/common/SideBar";
2 |
3 | export default function RootLayout({ children }) {
4 | return (
5 |
6 |
7 | {children}
8 |
9 | );
10 | }
11 |
--------------------------------------------------------------------------------
/lg_pentest/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph Cloud Example
2 |
3 | 
4 |
5 | > Add logic to nodes and routes
6 | Check out [LangGraph Example](https://github.com/langchain-ai/langgraph-example) and [LangGraph Engineer](https://github.com/hwchase17/langgraph-engineer) for how to structure the code.
7 |
8 |
--------------------------------------------------------------------------------
/ui/app/chat/layout.jsx:
--------------------------------------------------------------------------------
1 | import SideBar from "../../components/common/SideBar";
2 |
3 | export default function RootLayout({ children }) {
4 | // TODO context: model status
5 | // TODO context: summary report
6 | return (
7 |
8 |
9 | {children}
10 |
11 | );
12 | }
13 |
--------------------------------------------------------------------------------
/ui/components/chat/UserChatBubble.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 |
3 | export default function UserChatBubble({ children }) {
4 | return (
5 |
8 | );
9 | }
10 |
--------------------------------------------------------------------------------
/ui/components/common/SideBarItem.jsx:
--------------------------------------------------------------------------------
1 |
2 | function SideBarItem({ text, icon, link = "#", className = "" }) {
3 | return (
4 |
8 | {icon}
9 | {text}
10 |
11 | )
12 | }
13 |
14 | export default SideBarItem
--------------------------------------------------------------------------------
/ui/components/buttons/GradientBtn.jsx:
--------------------------------------------------------------------------------
1 | const GradientBtn = ({ text, onClick = null }) => {
2 | const buttonStyle = {
3 | border: 'none',
4 | color: '#fff',
5 | padding: '10px 20px',
6 | borderRadius: '5px',
7 | cursor: 'pointer',
8 | };
9 |
10 | return (
11 |
14 | );
15 | };
16 |
17 | export default GradientBtn;
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/utils/model.py:
--------------------------------------------------------------------------------
1 | from langchain_openai import ChatOpenAI
2 | from langchain_anthropic import ChatAnthropic
3 |
4 | def _get_model(model_name: str = "openai"):
5 | if model_name == "anthropic":
6 | model = ChatAnthropic(temperature=0, model_name="claude-3-5-sonnet-20240620")
7 | elif model_name == "openai":
8 | model = ChatOpenAI(temperature=0, model_name="gpt-4")
9 | else:
10 | raise ValueError(f"Unsupported model type: {model_name}")
11 |
12 | return model
13 |
--------------------------------------------------------------------------------
/ui/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 | .yarn/install-state.gz
8 |
9 | # testing
10 | /coverage
11 |
12 | # next.js
13 | /.next/
14 | /out/
15 |
16 | # production
17 | /build
18 |
19 | # misc
20 | .DS_Store
21 | *.pem
22 |
23 | # debug
24 | npm-debug.log*
25 | yarn-debug.log*
26 | yarn-error.log*
27 |
28 | # local env files
29 | .env*.local
30 |
31 | # vercel
32 | .vercel
33 |
34 | # typescript
35 | *.tsbuildinfo
36 | next-env.d.ts
37 |
--------------------------------------------------------------------------------
/ui/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "lib": [
4 | "dom",
5 | "dom.iterable",
6 | "esnext"
7 | ],
8 | "allowJs": true,
9 | "skipLibCheck": true,
10 | "strict": false,
11 | "noEmit": true,
12 | "incremental": true,
13 | "module": "esnext",
14 | "esModuleInterop": true,
15 | "moduleResolution": "node",
16 | "resolveJsonModule": true,
17 | "isolatedModules": true,
18 | "jsx": "preserve",
19 | "plugins": [
20 | {
21 | "name": "next"
22 | }
23 | ]
24 | },
25 | "include": [
26 | "next-env.d.ts",
27 | ".next/types/**/*.ts",
28 | "**/*.ts",
29 | "**/*.tsx"
30 | ],
31 | "exclude": [
32 | "node_modules"
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ui",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "@microsoft/fetch-event-source": "^2.0.1",
13 | "axios": "^1.7.4",
14 | "langchain": "^0.2.16",
15 | "next": "14.2.5",
16 | "react": "^18",
17 | "react-dom": "^18",
18 | "react-icons": "^5.3.0",
19 | "react-markdown": "^9.0.1",
20 | "react-syntax-highlighter": "^15.5.0",
21 | "remark-gfm": "^4.0.0",
22 | "swr": "^2.2.5"
23 | },
24 | "devDependencies": {
25 | "@types/node": "22.2.0",
26 | "@types/react": "18.3.3",
27 | "eslint": "^8",
28 | "eslint-config-next": "14.2.5",
29 | "postcss": "^8",
30 | "tailwindcss": "^3.4.1"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/ui/hooks/useCookie.js:
--------------------------------------------------------------------------------
1 | import { useState } from 'react';
2 |
3 | export const useCookie = (cookieName) => {
4 | const getCookie = (name) => {
5 | const value = `; ${document.cookie}`;
6 | const parts = value.split(`; ${name}=`);
7 | if (parts.length === 2) return parts.pop().split(';').shift();
8 | return null;
9 | };
10 |
11 | const [cookie, setCookieState] = useState(() => getCookie(cookieName));
12 |
13 | const setCookie = (value, days) => {
14 | const expires = new Date(Date.now() + days * 86400000).toUTCString();
15 | document.cookie = `${cookieName}=${value}; expires=${expires}; path=/`;
16 | setCookieState(value);
17 | };
18 |
19 | const deleteCookie = () => {
20 | document.cookie = `${cookieName}=; Max-Age=-99999999; path=/`;
21 | setCookieState(null);
22 | };
23 |
24 | return [cookie, setCookie, deleteCookie];
25 | };
26 |
--------------------------------------------------------------------------------
/ui/components/buttons/SideBarBtn.jsx:
--------------------------------------------------------------------------------
1 | const RoundedButton = ({ icon, onClick, disabled = false }) => {
2 | return (
3 |
16 | );
17 | };
18 |
19 | export default RoundedButton;
20 |
--------------------------------------------------------------------------------
/ui/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | darkMode: 'selector',
4 | content: [
5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}",
6 | "./components/**/*.{js,ts,jsx,tsx,mdx}",
7 | "./app/**/*.{js,ts,jsx,tsx,mdx}",
8 | ],
9 | theme: {
10 | extend: {
11 | colors: {
12 | 'deep-dark': '#0C131A',
13 | 'dark-background': '#111827',
14 | 'light-background': '#f5f5f5',
15 | },
16 | backgroundImage: {
17 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))",
18 | "gradient-conic":
19 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))",
20 | "custom-gradient": "linear-gradient(to right, #5976F9, #04A5D3)",
21 | 'custom-gradient-dark': 'linear-gradient(to right, #111827, #001f35)',
22 | },
23 | },
24 | },
25 | plugins: [],
26 | };
27 |
--------------------------------------------------------------------------------
/ui/components/common/StatIndicator.jsx:
--------------------------------------------------------------------------------
1 | const StatIndicator = ({ criticality, text, number = null, className = "" }) => {
2 | let colorClass;
3 |
4 | // TODO: edit these keys and vaues
5 | switch (criticality) {
6 | case 'low':
7 | colorClass = 'bg-gray-50 text-gray-800';
8 | break;
9 | case 'medium':
10 | colorClass = 'bg-yellow-100 text-yellow-800';
11 | break;
12 | case 'high':
13 | colorClass = 'dark:bg-[#F44336] bg-red-100 text-red-800';
14 | break;
15 | case 'critical':
16 | colorClass = 'bg-red-200 text-red-800';
17 | break;
18 | default:
19 | colorClass = 'bg-gray-500';
20 | break;
21 | }
22 |
23 | return (
24 |
25 | {(number || '') + " " + text}
26 |
27 | );
28 | };
29 |
30 | export default StatIndicator;
--------------------------------------------------------------------------------
/ui/components/common/BgBlur.jsx:
--------------------------------------------------------------------------------
1 |
2 | const BokehBackground = ({ baseColor = '#0a0a0c', spotColor = '0,0,255', blur = 10 }) => {
3 | // Base styles for the background (tailwind)
4 | const baseStyles = `
5 | fixed inset-0
6 | -z-10
7 | bg-no-repeat
8 | bg-cover
9 | `;
10 |
11 | // Function to that creates gradient spots
12 | const createSpot = (position, size) =>
13 | `radial-gradient(circle at ${position}, rgba(${spotColor},0.2) 0%, rgba(${spotColor},0) ${size}%)`;
14 |
15 | // Combine multiple spots
16 | const gradientString = [
17 | createSpot('20% 30%', 20),
18 | createSpot('70% 60%', 25),
19 | createSpot('40% 80%', 15)
20 | ].join(',');
21 |
22 | // Styles that need to be applied inline
23 | const inlineStyles = {
24 | backgroundColor: baseColor,
25 | backgroundImage: gradientString,
26 | filter: `blur(${blur}px)`
27 | };
28 |
29 | return (
30 |
31 | );
32 | };
33 |
34 | export default BokehBackground;
--------------------------------------------------------------------------------
/ui/app/playground/StreamComponent.jsx:
--------------------------------------------------------------------------------
1 | // StreamComponent.jsx
2 |
3 | import React, { useEffect, useState } from "react";
4 | import { fetchEventSource } from "@microsoft/fetch-event-source";
5 | import BotChatBubble from "../../components/chat/BotChatBubble.jsx"
6 |
7 | function StreamComponent({message}) {
8 | const [messages, setMessages] = useState([]);
9 | useEffect(() => {
10 | const fetchData = async () => {
11 | await fetchEventSource("http://localhost:8000/stream", {
12 | headers: {
13 | "Content-Type": "application/json",
14 | },
15 | method: "POST",
16 | body: JSON.stringify({ query: message }),
17 | onmessage(ev) {
18 | console.log(`Received event: ${ev.data}`); // for debugging purposes
19 | setMessages((prev) => [...prev, ev.data]);
20 | },
21 | });
22 | };
23 | fetchData();
24 | }, []);
25 |
26 | return (
27 |
28 |
29 | {messages.join(" ")}
30 |
31 |
32 | );
33 | }
34 |
35 | export default StreamComponent;
36 |
--------------------------------------------------------------------------------
/ui/hooks/ThemeContext.js:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import React, { createContext, useContext, useEffect, useState } from 'react';
4 | import { useCookie } from '../hooks/useCookie';
5 |
6 | const ThemeContext = createContext();
7 |
8 | export const ThemeProvider = ({ children }) => {
9 | const [cookieTheme, setCookieTheme] = useCookie('theme', 'light');
10 | const [theme, setTheme] = useState(cookieTheme);
11 |
12 | useEffect(() => {
13 | document.documentElement.classList.toggle('dark', theme === 'dark');
14 | setCookieTheme(theme, 365);
15 | }, [theme, setCookieTheme]);
16 |
17 | const toggleTheme = () => {
18 | setTheme(prevTheme => prevTheme === 'light' ? 'dark' : 'light');
19 | };
20 |
21 | return (
22 |
23 | {children}
24 |
25 | );
26 | };
27 |
28 | export const useTheme = () => {
29 | const context = useContext(ThemeContext);
30 | if (context === undefined) {
31 | throw new Error('useTheme must be used within a ThemeProvider');
32 | }
33 | return context;
34 | };
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/utils/state.py:
--------------------------------------------------------------------------------
1 | from langgraph.graph import add_messages
2 | from langchain_core.messages import BaseMessage, AnyMessage
3 | from typing_extensions import TypedDict, Annotated, Sequence, List, Literal, Optional, Dict
4 | import operator
5 |
6 | class AgentState(TypedDict):
7 | messages: Annotated[Sequence[BaseMessage], add_messages]
8 |
9 | class RecoderOptions(TypedDict):
10 | report: str
11 | generate_final_report: bool
12 | file_names: List[str]
13 |
14 | # Define the state structure
15 | class PentestState(TypedDict):
16 | messages: Annotated[Sequence[AnyMessage], operator.add]
17 | current_step: Literal['pentester', 'evaluator', 'recorder', '__end__']
18 | pentest_results: dict
19 | pentest_tasks: list
20 | task: str
21 | evaluation: str
22 | model_status: str
23 | findings: Dict[str, List[str]]
24 | command: str
25 | tool_name: str
26 | tool_results: str
27 | recorder_options: Optional[RecoderOptions]
28 |
29 | # Define the config
30 | class GraphConfig(TypedDict):
31 | model_name: Literal["anthropic", "openai"]
32 | pentester_model: Literal["ollama"," anthropic", "openai"]
33 |
--------------------------------------------------------------------------------
/ui/app/layout.jsx:
--------------------------------------------------------------------------------
1 | 'use client'
2 | import { ThemeProvider } from '../hooks/ThemeContext'; // Adjust the path if necessary
3 | import { ModelProvider } from '../hooks/ModelContext'; // Adjust the import path as necessary
4 | import "../styles/globals.css";
5 |
6 | // export const metadata = {
7 | // title: "BreachSeek",
8 | // description: "AI powered pentesting Agent",
9 | // icons: {
10 | // icon: [
11 | // { url: '/icon.svg', sizes: '32x32', type: 'image/svg+xml' },
12 | // { url: '/icon.svg', sizes: '64x64', type: 'image/svg+xml' },
13 | // { url: '/icon.svg', sizes: '128x128', type: 'image/svg+xml' },
14 | // { url: '/icon.svg', sizes: '256x256', type: 'image/svg+xml' }
15 | // ],
16 | // },
17 | // };
18 |
19 | export default function RootLayout({ children }) {
20 | return (
21 |
22 |
27 |
28 |
29 | {children}
30 |
31 |
32 |
33 |
34 | );
35 | }
--------------------------------------------------------------------------------
/ui/hooks/ModelContext.js:
--------------------------------------------------------------------------------
1 | import React, { createContext, useState, useContext } from "react";
2 |
3 | // Create the context
4 | const ModelContext = createContext();
5 |
6 | // Create a provider component
7 | export function ModelProvider({ children }) {
8 | const [modelStatus, setModelStatus] = useState("idle");
9 | const [modelSummary, setModelSummary] = useState([]);
10 | const [modelFindings, setModelFindings] = useState([]);
11 | const [modelCommands, setModelCommands] = useState([]);
12 |
13 | // Value object to be provided to consuming components
14 | const value = {
15 | modelStatus,
16 | setModelStatus,
17 | modelSummary,
18 | setModelSummary,
19 | modelFindings,
20 | setModelFindings,
21 | modelCommands,
22 | setModelCommands,
23 | };
24 |
25 | return (
26 | {children}
27 | );
28 | }
29 |
30 | // Custom hook for using the context
31 | export function useModelContext() {
32 | const context = useContext(ModelContext);
33 | if (context === undefined) {
34 | throw new Error("useModel must be used within a ModelProvider");
35 | }
36 | return context;
37 | }
38 |
--------------------------------------------------------------------------------
/ui/components/common/ModelStatus.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 |
3 | const statusConfig = new Map([
4 | ['supervisor', { colorClass: 'bg-yellow-100 text-yellow-800', text: 'Supervising' }],
5 | ['pentester', { colorClass: 'bg-blue-100 text-blue-800', text: 'Pentesting' }],
6 | ['evaluator', { colorClass: 'bg-purple-100 text-purple-800', text: 'Evaluating' }],
7 | ['tools_node', { colorClass: 'bg-purple-100 text-purple-800', text: 'Evaluating' }],
8 | ['Done', { colorClass: 'bg-green-100 text-green-800', text: 'Done' }]
9 | ]);
10 |
11 | const ModelStatus = ({ status }) => {
12 | const [dots, setDots] = useState('');
13 | console.log('status:', status);
14 |
15 | useEffect(() => {
16 | const interval = setInterval(() => {
17 | setDots(prev => (prev.length < 3 ? prev + '.' : ''));
18 | }, 500);
19 |
20 | return () => clearInterval(interval);
21 | }, []);
22 |
23 | const { colorClass, text } = statusConfig.get(status) || { colorClass: 'dark:bg-slate-400 bg-slate-400 text-white', text: 'idle' };
24 |
25 | return (
26 |
27 | {text}{(status === 'Done' || status === 'idle') ? '' : dots}
28 |
29 | );
30 | };
31 |
32 | export default ModelStatus;
--------------------------------------------------------------------------------
/ui/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BreachSeek
2 |
3 | A multi-agent AI model designed to automate penetration testing
4 |
5 | ## Poster
6 | .png)
7 |
8 | ## Screenshots
9 | Dark theme:
10 | 
11 |
12 | Light theme:
13 | 
14 |
15 | ## Model Architecture
16 | The model utilizes LangGraph and implements the following architecture/workflow:
17 |
18 | 
19 |
20 | ## Installation
21 |
22 | We recommend creating a virtual environment using
23 |
24 | ```console
25 | python -m venv venv
26 | ```
27 |
28 | Activate the venv:
29 | ```console
30 | source venv/bin/activate
31 | ```
32 |
33 | Then install the required libraries, make sure to install that chat model you prefer, you can uncomment lines inside the requirements file or add your own.
34 |
35 | ```console
36 | pip install -r requirements.txt
37 | ```
38 |
39 | ## Running the code
40 | First ensure that in the an OS environment of `ANTHROPIC_API_KEY` has already been set
41 |
42 | Run the following for the backend
43 | ```console
44 | python main.py
45 | ```
46 |
47 | Change directories into the UI folder
48 | ```console
49 | cd ui
50 | ```
51 |
52 | Run the following:
53 | ```console
54 | npm install
55 | ```
56 |
57 | ```console
58 | npm run dev
59 | ```
60 |
61 | You can access the web UI at `localhost:3000`
62 |
--------------------------------------------------------------------------------
/ui/components/chat/ChatInputField.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useState } from "react";
3 | import { FaArrowUpLong } from "react-icons/fa6";
4 |
5 | export default function ChatInputField({ handleSubmit }) {
6 | const [userPrompt, setUserPrompt] = useState("");
7 | const [disabledSubmit, setDisabledSubmit] = useState(false);
8 |
9 | const handleChange = (e) => {
10 | setUserPrompt(e.target.value);
11 | // console.log("writing message", userPrompt);
12 | };
13 |
14 | const _handleSubmit = (message) => {
15 | if (message) {
16 | setUserPrompt("");
17 | handleSubmit(message);
18 | }
19 | };
20 |
21 | return (
22 |
23 | {
29 | if (e.key === "Enter") {
30 | _handleSubmit(userPrompt);
31 | }
32 | }}
33 | value={userPrompt}
34 | />
35 |
42 |
43 | );
44 | }
45 |
--------------------------------------------------------------------------------
/ui/README.md:
--------------------------------------------------------------------------------
1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
2 |
3 | ## Getting Started
4 |
5 | First, run the development server:
6 |
7 | ```bash
8 | npm run dev
9 | # or
10 | yarn dev
11 | # or
12 | pnpm dev
13 | # or
14 | bun dev
15 | ```
16 |
17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
18 |
19 | You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file.
20 |
21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
22 |
23 | ## Learn More
24 |
25 | To learn more about Next.js, take a look at the following resources:
26 |
27 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
28 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
29 |
30 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome!
31 |
32 | ## Deploy on Vercel
33 |
34 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
35 |
36 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
37 |
--------------------------------------------------------------------------------
/ui/styles/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | /* TODO: Find font */
6 | * {
7 | /* font-family: "Roboto", sans-serif; */
8 | }
9 |
10 | @layer base {
11 | h1 {
12 | @apply text-4xl font-bold;
13 | }
14 | h2 {
15 | @apply text-3xl font-bold;
16 | }
17 |
18 | h3 {
19 | @apply text-2xl font-bold;
20 | }
21 |
22 | h4 {
23 | @apply text-xl font-bold;
24 | }
25 |
26 | h5 {
27 | @apply text-lg font-bold;
28 | }
29 |
30 | }
31 |
32 | @font-face {
33 | font-family: 'Roboto';
34 | src: url('/fonts/Roboto-Regular.ttf') format('truetype');
35 | font-weight: 400;
36 | }
37 |
38 | @font-face {
39 | font-family: 'Roboto';
40 | src: url('/fonts/Roboto-Bold.ttf') format('truetype');
41 | font-weight: 700;
42 | }
43 |
44 | @font-face {
45 | font-family: 'Roboto';
46 | src: url('/fonts/Roboto-Black.ttf') format('truetype');
47 | font-weight: 800;
48 | }
49 |
50 | @font-face {
51 | font-family: 'Roboto';
52 | src: url('/fonts/Roboto-Light.ttf') format('truetype');
53 | font-weight: 300;
54 | }
55 |
56 | * {
57 | font-family: 'Roboto';
58 | }
59 |
60 | :root {
61 | --foreground-rgb: 0, 0, 0;
62 | --background-start-rgb: 214, 219, 220;
63 | --background-end-rgb: 255, 255, 255;
64 | }
65 |
66 | @media (prefers-color-scheme: dark) {
67 | :root {
68 | --foreground-rgb: 255, 255, 255;
69 | --background-start-rgb: 0, 0, 0;
70 | --background-end-rgb: 0, 0, 0;
71 | }
72 | }
73 |
74 |
75 | @layer utilities {
76 | .text-balance {
77 | text-wrap: balance;
78 | }
79 | }
80 |
81 | .content {
82 | position: relative;
83 | z-index: 1;
84 | }
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/agent.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated, TypedDict, Literal
2 |
3 | from langgraph.graph import StateGraph, END
4 | from langchain_core.messages import HumanMessage, AIMessage
5 |
6 |
7 | from lg_pentest.pentest_agent.supervisor import supervisor
8 | from lg_pentest.pentest_agent.pentester import pentester
9 | from lg_pentest.pentest_agent.tools import tools_node
10 | from lg_pentest.pentest_agent.evaluator import evaluator
11 | from lg_pentest.pentest_agent.utils.state import PentestState, GraphConfig
12 |
13 |
14 |
15 | # def tools(state: PentestState) -> PentestState:
16 | # # Logic for tools node
17 | # return {'pentest_results': {'vulnerability': 'SQL Injection found'}, 'current_step': 'pentester'}
18 |
19 | def recorder(state: PentestState) -> PentestState:
20 | # Logic for recorder node
21 | return {'report': 'Pentest Report: Critical SQL Injection vulnerability found', 'current_step': 'end'}
22 |
23 | def route_supervisor(state: PentestState) -> Literal['pentester', 'evaluator', 'recorder', '__end__']:
24 | return state['current_step']
25 |
26 | def route_pentester(state: PentestState) -> Literal['tools_node', 'evaluator']:
27 | return 'tools_node' if state['current_step'] == 'pentester' else 'evaluator'
28 |
29 |
30 | # Create the graph
31 | workflow = StateGraph(PentestState, config_schema=StateGraph)
32 |
33 | # Add nodes
34 | workflow.add_node('supervisor', supervisor)
35 | workflow.add_node('pentester', pentester)
36 | workflow.add_node('tools_node', tools_node)
37 | workflow.add_node('evaluator', evaluator)
38 | workflow.add_node('recorder', recorder)
39 |
40 | # Add edges
41 | workflow.set_entry_point('supervisor')
42 | workflow.add_conditional_edges('supervisor', route_supervisor)
43 | workflow.add_conditional_edges('pentester', route_pentester)
44 | workflow.add_edge('tools_node', 'pentester')
45 | workflow.add_edge('evaluator', 'supervisor')
46 | workflow.add_edge('recorder', 'supervisor')
47 |
48 | # Compile the graph
49 | pentest_graph = workflow.compile()
50 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/utils/nodes.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | from langchain_anthropic import ChatAnthropic
3 | from langchain_openai import ChatOpenAI
4 | from lg_pentest.pentest_agent.utils.tools import tools
5 | from langgraph.prebuilt import ToolNode
6 |
7 |
8 | @lru_cache(maxsize=4)
9 | def _get_model(model_name: str):
10 | if model_name == "anthropic":
11 | model = ChatAnthropic(temperature=0, model_name="claude-3-haiku-20240307")
12 | # model = ChatAnthropic(temperature=0, model_name="claude-3-5-sonnet-20240620")
13 | elif model_name == "openai":
14 | model = ChatOpenAI(temperature=0, model_name="gpt-4o")
15 | else:
16 | raise ValueError(f"Unsupported model type: {model_name}")
17 |
18 | model = model.bind_tools(tools)
19 | return model
20 |
21 | # Define the function that determines whether to continue or not
22 | def should_continue(state):
23 | messages = state["messages"]
24 | last_message = messages[-1]
25 | # If there are no tool calls, then we finish
26 | if not last_message.tool_calls:
27 | return "end"
28 | # Otherwise if there is, we continue
29 | else:
30 | return "continue"
31 |
32 |
33 | system_prompt = """
34 | Be a helpful agent, you can search the web and you can run shell commands.
35 | If you receive the output from a tool
36 | show the output of the commands in markdown for debugging purposes prefix your
37 | response after the tool call with DEBUG: keyword.
38 |
39 | """
40 |
41 | # Define the function that calls the model
42 | def call_model(state, config):
43 | messages = state["messages"]
44 | messages = [{"role": "system", "content": system_prompt}] + messages
45 | model_name = config.get('configurable', {}).get("model_name", "anthropic")
46 | model = _get_model(model_name)
47 | response = model.invoke(messages)
48 | # We return a list, because this will get added to the existing list
49 | return {"messages": [response]}
50 |
51 | # Define the function to execute tools
52 | tool_node = ToolNode(tools)
53 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/sup_agent.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated, TypedDict, Literal
2 |
3 | from langgraph.graph import StateGraph, END
4 | from langchain_core.messages import HumanMessage, AIMessage
5 |
6 | import operator
7 | from lg_pentest.pentest_agent.utils.nodes import call_model, should_continue, tool_node
8 | from lg_pentest.pentest_agent.utils.state import AgentState
9 | import os
10 |
11 |
12 | # Define the config
13 | class GraphConfig(TypedDict):
14 | model_name: Literal["anthropic", "openai"]
15 |
16 |
17 | # Define a new graph
18 | workflow = StateGraph(AgentState, config_schema=GraphConfig)
19 |
20 | # Define the two nodes we will cycle between
21 | workflow.add_node("agent", call_model)
22 | workflow.add_node("action", tool_node)
23 |
24 | # Set the entrypoint as `agent`
25 | # This means that this node is the first one called
26 | workflow.set_entry_point("agent")
27 |
28 | # We now add a conditional edge
29 | workflow.add_conditional_edges(
30 | # First, we define the start node. We use `agent`.
31 | # This means these are the edges taken after the `agent` node is called.
32 | "agent",
33 | # Next, we pass in the function that will determine which node is called next.
34 | should_continue,
35 | # Finally we pass in a mapping.
36 | # The keys are strings, and the values are other nodes.
37 | # END is a special node marking that the graph should finish.
38 | # What will happen is we will call `should_continue`, and then the output of that
39 | # will be matched against the keys in this mapping.
40 | # Based on which one it matches, that node will then be called.
41 | {
42 | # If `tools`, then we call the tool node.
43 | "continue": "action",
44 | # Otherwise we finish.
45 | "end": END,
46 | },
47 | )
48 |
49 | # We now add a normal edge from `tools` to `agent`.
50 | # This means that after `tools` is called, `agent` node is called next.
51 | workflow.add_edge("action", "agent")
52 |
53 | # Finally, we compile it!
54 | # This compiles it into a LangChain Runnable,
55 | # meaning you can use it as you would any other runnable
56 | graph = workflow.compile()
57 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | from dotenv import load_dotenv
5 | load_dotenv()
6 |
7 | import os
8 | import json
9 | from rich import print
10 | from typing_extensions import Annotated, TypedDict
11 |
12 | from fastapi import FastAPI, Body
13 | from fastapi.responses import StreamingResponse
14 | from fastapi.middleware.cors import CORSMiddleware
15 | from langchain_core.messages import HumanMessage, ToolMessage
16 |
17 | from lg_pentest.pentest_agent.agent import pentest_graph
18 |
19 |
20 | app = FastAPI(
21 | title="BreachSeek",
22 | version="1.0",
23 | description="Pentester AI Agents",
24 | )
25 |
26 | # so that we can run in our browser
27 | app.add_middleware(
28 | CORSMiddleware,
29 | allow_origins=["*"],
30 | allow_credentials=True,
31 | allow_methods=["*"],
32 | allow_headers=["*"],
33 | expose_headers=["*"],
34 | )
35 |
36 |
37 | def event_stream(query: str):
38 | # Sends an event every second with data: "Message {i}"
39 | initial_state = {
40 | "messages": [{"role": "user", "content": query}],
41 | # "findings": {
42 | # "critical": ['4 vulnerabilities'],
43 | # "medium": ['11 open ports'],
44 | # },
45 | # "command": "nmap -sV -sC -p- -oN metasploitable2_scan.txt 192.168.100.231",
46 | # "model_status": "scanning",
47 | # "evaluation": "wow",
48 | }
49 | for chunk in pentest_graph.stream(initial_state, stream_mode="updates"):
50 | for node_name, node_results in chunk.items():
51 | print(f'{node_name = }')
52 | node_results['agent'] = node_name
53 | print(node_results)
54 | chunk_messages = node_results.get("messages", [])
55 | model_status = node_results.get("model_status", [])
56 | for message in chunk_messages:
57 | if not message['content']:
58 | continue
59 | if isinstance(message, ToolMessage):
60 | continue
61 | if message['role'] == 'tool_calls':
62 | event_str = "event: tool_event"
63 | else:
64 | event_str = "event: ai_event"
65 | # data_str = f"data: {message.content}"
66 | data_str = 'data: ' + json.dumps(node_results)
67 | yield f"{event_str}\n{data_str}\n\n"
68 |
69 |
70 | @app.post("/stream")
71 | async def stream(query: Annotated[str, Body(embed=True)]):
72 | return StreamingResponse(event_stream(query), media_type="text/event-stream")
73 |
74 |
75 | if __name__ == "__main__":
76 | import uvicorn
77 |
78 | uvicorn.run(app, host="localhost", port=8000)
79 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/recorder.py:
--------------------------------------------------------------------------------
1 | from utils.state import PentestState
2 | from utils.model import _get_model
3 |
4 | from langchain_core.pydantic_v1 import BaseModel, Field
5 |
6 |
7 | class Report(BaseModel):
8 | report: str = Field(description='This is the summary written in Latex')
9 | file_name: str = Field(description='This is the filename of the Latex report')
10 |
11 |
12 | def recorder(state: PentestState):
13 | model = _get_model().with_structured_output(Report)
14 | generate_final_report = state['recorder_options']['generate_final_report']
15 |
16 | if generate_final_report:
17 | return recorder_final(state, model)
18 | else:
19 | state = recorder_summary(state, model)
20 | return recorder_final(state, model)
21 |
22 | def recorder_summary(state: PentestState, model):
23 | prompt = '''You are tasked with recording and summarizing information of a chat between a human and an LLM in \
24 | which the LLM is utilizing the command line to execute tasks. You have as an input the history of the last command that ran \
25 | which inclueds the output logs and the message prompt for previous commands:
26 |
27 |
28 | {history}
29 |
30 |
31 | Generate a summary for this using latex.
32 | '''
33 | formats = {
34 | 'history': {'user_prompts': state['messages']} | state['pentest_results'],
35 | }
36 | prompt = prompt.format(**formats)
37 |
38 | response = model.invoke(prompt)
39 |
40 | state['recorder_options']['file_names'].append(response.file_name)
41 |
42 | with open(response.file_name, 'w') as f:
43 | f.write(response.report)
44 |
45 | return state
46 |
47 | def recorder_final(state: PentestState, model):
48 | prompt = '''You are tasked with summarizing and reporting information of a chat between a human and an LLM in \
49 | whihc the LLM is utilizing the command line to execute tasks. You have as input the history of summaries of all previous \
50 | outputs and interactions between the human and the LLM:
51 |
52 |
53 | {history}
54 |
55 |
56 | You are tasked with generating a final report in a Latex format. Also return the file path as a relative path.
57 | '''
58 |
59 | summaries = []
60 | file_names = state['recorder_options']['file_names']
61 | for file_name in file_names:
62 | with open(file_name) as f:
63 | summary = f.read()
64 | summaries.append(summary)
65 |
66 | formats = {
67 | 'history': summaries
68 | }
69 | prompt = prompt.format(**formats)
70 |
71 | response = model.invoke(prompt)
72 | state['recorder_options']['report'] = response.report
73 | with open(response.file_name, 'w') as f:
74 | f.write(response.report)
75 |
76 | return state
77 |
78 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/supervisor.py:
--------------------------------------------------------------------------------
1 | from typing_extensions import Annotated, TypedDict, Literal, Sequence, Dict
2 | import operator
3 | from langchain_core.messages import BaseMessage, AnyMessage, HumanMessage
4 | from lg_pentest.pentest_agent.utils.model import _get_model
5 | from lg_pentest.pentest_agent.utils.state import PentestState
6 | from langchain_core.messages import SystemMessage
7 |
8 | system_prompt = SystemMessage("""
9 | You are the supervisor of AI agents responsible for overseeing their performance,
10 | you send the pentesting tasks to pentester agent, and you receive the response
11 | from an evaluator agent, if the evaluator says everything is done, then end the program,
12 |
13 | Try to end as soon as possible.
14 | """)
15 |
16 | class Task(TypedDict):
17 | supervisor_thought: Annotated[str, ..., "supervisor thoughts that includes decision making based on evaluator prompt if he responded"]
18 | tasks: list
19 | next_agent: Literal['pentester', 'evaluator', 'recorder', '__end__']
20 | done: bool
21 |
22 | def _swap_messages(messages):
23 | new_messages = []
24 | for m in messages:
25 | if m['role'] == 'assistant':
26 | new_messages.append({"role": "user", "content": m['content']})
27 | else:
28 | new_messages.append({"role": "assistant", "content": m['content']})
29 | return new_messages
30 |
31 | # Define node functions
32 | def supervisor(state: PentestState) -> PentestState:
33 | if state['evaluation'] and 'end' in state['evaluation']:
34 | return {
35 | 'messages': [{'role': 'assistant', 'content': 'done, bye!'}],
36 | 'current_step': '__end__',
37 | }
38 | messages = [system_prompt] + state['messages']
39 | if state['evaluation']:
40 | messages = [system_prompt] + [HumanMessage(content=f"based on {state['evaluation']}, what should we do now to finish the tasks: {state['pentest_tasks']}")]
41 | # messages = [system_prompt] + state['messages']
42 | model = _get_model().with_structured_output(Task)
43 | response = model.invoke(messages) # error here
44 | if response['done']:
45 | return {
46 | 'messages': [{'role': 'assistant', 'content': 'done, bye!'}],
47 | 'current_step': '__end__',
48 | }
49 | state['current_step'] = response['next_agent']
50 | if response['next_agent'] == 'pentester':
51 | return {
52 | 'messages': [{'role': 'assistant',
53 | 'content': response['supervisor_thought']}],
54 | 'current_step': 'pentester',
55 | 'pentest_tasks': response['tasks'],
56 | }
57 | return {
58 | 'messages': [{'role': 'assistant', 'content': 'done, bye!'}],
59 | 'current_step': '__end__',
60 | }
61 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/evaluator.py:
--------------------------------------------------------------------------------
1 | from typing_extensions import Annotated, TypedDict, List, Optional, Dict, Literal
2 | from lg_pentest.pentest_agent.utils.model import _get_model
3 | from lg_pentest.pentest_agent.utils.state import PentestState
4 | from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
5 |
6 | class Task(TypedDict):
7 | """Task evaluation"""
8 | message: Annotated[str, ..., "The message to respond to the user"]
9 | evaluation: Annotated[str, ..., "Evaluate the pentester response, see if he finished the task"]
10 | done: Annotated[bool, ..., "True if the pentester finished the task"]
11 | fail: Annotated[bool, ..., "True if the pentester can't finish the task, this will end the program"]
12 | findings: Annotated[Dict[Literal['critical', 'high', 'medium', 'low'], List[str]], ..., "denotes findings worth of mention"]
13 | # message: str
14 | # phase: Annotated[str, ... , "Current phase of pentesting"]
15 | # task: Annotated[str, ... , "Task to perform so that the tool agent succeeds."]
16 | # hints: Annotated[Optional[List[str]], ... , "May contain hints about the task"]
17 | # constraints: Annotated[Optional[List[str]], ... , "Contains constraints that the tool user agent should be aware of"]
18 | # program_name: Annotated[str, ... , "Program name that the tool uses to finish a task"]
19 | # args: Annotated[Optional[Dict], ... , "Args that might help the tool user agent"]
20 | # results: Annotated[Dict, ... , "Title of findings as keys, and a description as their value"]
21 | # results: str
22 |
23 | def _swap_messages(messages):
24 | new_messages = []
25 | for m in messages:
26 | if m['role'] == 'assistant':
27 | new_messages.append({"role": "user", "content": m['content']})
28 | else:
29 | new_messages.append({"role": "assistant", "content": m['content']})
30 | return new_messages
31 |
32 | def evaluator(state: PentestState) -> PentestState:
33 | system_message = SystemMessage("""
34 | You are evaluating the response from a pentester agent,
35 | if the pentester brings back some results, give him a pass,
36 | if he did very bad, and you think he can do better, point
37 | out his mistake and make him redo it again, if the pentester
38 | can't do the task. End the program, and let the supervisor know.
39 | """)
40 |
41 | model = _get_model().with_structured_output(Task)
42 | messages = [system_message] + [HumanMessage(content=f"evaluate pentester results {state['tool_results']}")]
43 | # messages = [system_message] \
44 | # + _swap_messages(state['messages']) \
45 | # + [HumanMessage(content=f"evaluate pentester results")]
46 | response = model.invoke(messages)
47 |
48 | if response['done'] or response['fail']:
49 | response['evaluation'] += 'end program now'
50 | return {
51 | 'messages': [{'role': 'assistant',
52 | 'content': response['message']}],
53 | 'evaluation': response['evaluation'],
54 | 'current_step': 'supervisor' if response['done'] or response['fail'] else 'pentester',
55 | # "findings": {
56 | # "critical": ['4 vulnerabilities'],
57 | # "medium": ['11 open ports'],
58 | # },
59 | 'findings': response['findings'],
60 | }
61 |
62 |
--------------------------------------------------------------------------------
/ui/components/chat/BotChatBubble.jsx:
--------------------------------------------------------------------------------
1 | import ReactMarkdown from "react-markdown";
2 | import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
3 | import {
4 | nightOwl,
5 | oneLight,
6 | } from "react-syntax-highlighter/dist/esm/styles/prism";
7 | import remarkGfm from "remark-gfm";
8 | import { useEffect, useState } from "react";
9 | import { FaRobot, FaClipboardList, FaHatCowboy, FaCode } from "react-icons/fa";
10 | import { useCookie } from "../../hooks/useCookie";
11 | import { useTheme } from "../../hooks/ThemeContext";
12 | export default function BotChatBubble({ children, bot_name = "" }) {
13 | const [codeStyle, setCodeStyle] = useState(oneLight); // Default to light theme
14 |
15 | const { theme } = useTheme();
16 |
17 | useEffect(() => {
18 | console.log("Theme changed:", theme); // For debugging
19 | setCodeStyle(theme === "dark" ? nightOwl : oneLight);
20 | }, [theme]);
21 |
22 | const agents_props = {
23 | supervisor: {
24 | icon: ,
25 | },
26 | pentester: {
27 | icon: ,
28 | },
29 | evaluator: {
30 | icon: ,
31 | },
32 | tools_node: {
33 | icon: ,
34 | },
35 | };
36 |
37 | return (
38 |
39 |
40 |
41 | {agents_props[bot_name]?.icon || }
42 |
43 | {bot_name}
44 |
45 |
46 |
59 | {String(children).replace(/\n$/, "")}
60 |
61 | ) : (
62 |
63 | {children}
64 |
65 | );
66 | },
67 | table({ node, ...props }) {
68 | return (
69 |
73 | );
74 | },
75 | th({ node, ...props }) {
76 | return (
77 | |
85 | );
86 | },
87 | td({ node, ...props }) {
88 | return (
89 | |
93 | );
94 | },
95 | }}
96 | >
97 | {children}
98 |
99 |
100 |
101 | );
102 | }
103 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/pentester.py:
--------------------------------------------------------------------------------
1 | from lg_pentest.pentest_agent.utils.model import _get_model
2 | from lg_pentest.pentest_agent.utils.state import PentestState
3 | from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
4 | from langchain_core.pydantic_v1 import BaseModel, Field
5 | from typing_extensions import Annotated, TypedDict, List, Literal, Optional
6 |
7 | class Results(TypedDict):
8 | """The results that the pentester needs to work"""
9 | message: Annotated[str, ..., "Pentester thought process"]
10 | phase: Annotated[str, ..., "Current phase of pentesting, such as scanning or exploitation"]
11 | tasks: Annotated[List[str], ..., "Tasks to perform to complete the current phase"]
12 | results: Annotated[List[str], ..., "Summary of results"]
13 | tool_use: Annotated[bool, ..., "Indicates if tools are still needed to complete a task"]
14 | software: Annotated[List[str], ..., "program names that will be used to perform the tasks"]
15 | command: Annotated[str, ..., "possible command that will be run"]
16 |
17 | # class Results(BaseModel):
18 | # """The results that the pentester needs to work"""
19 | # message: str = Field(description="Pentester though process")
20 | # phase: str = Field(description="Current phase of pentesting, such as scanning or exploitation")
21 | # tasks: List[dict] = Field(description="Task to perform to be done with the current phase")
22 | # results: List[str] = Field(description="Summary of results")
23 | # tool_use: bool = Field(description="If we still need to call tools to perform finish a task")
24 | # software: List[str] = Field(description="program names that we will use to do the tasks")
25 |
26 |
27 |
28 | system_prompt = SystemMessage("""
29 | You are a pentester working with a team of AI agents, you will receive tasks from
30 | your supervisor, and you will have to use tools by calling a tool agent to
31 | use the software instead of you. Once everything is done, you pass your results
32 | to the evaluator. Don't complicate things, if a simple command works then use it.
33 | """)
34 |
35 | def _swap_messages(messages):
36 | new_messages = []
37 | for m in messages:
38 | if m['role'] == 'assistant':
39 | new_messages.append({"role": "user", "content": m['content']})
40 | else:
41 | new_messages.append({"role": "assistant", "content": m['content']})
42 | return new_messages
43 |
44 | def pentester(state: PentestState) -> PentestState:
45 | # print(state)
46 | tasks = f"""
47 | Do the following tasks:
48 |
49 | {state['pentest_tasks']}
50 | """
51 | # print(f'{tasks = }')
52 | if state['tool_results']: # remove later, for debugging
53 | return {'current_step': 'evaluator'}
54 | # messages = [system_prompt, HumanMessage(content=tasks)]
55 | messages = [system_prompt] + [{'role': 'user', 'content': tasks}] +_swap_messages(state['messages'])
56 | model = _get_model().with_structured_output(Results)
57 | response = model.invoke(messages)
58 | print(f'pentester:\n{response = }')
59 | if response['tool_use']:
60 | return {
61 | 'messages': [{'role': 'assistant',
62 | 'content': response['message']}],
63 | # 'tool_calls': response['software']}],
64 | 'current_step': 'pentester',
65 | 'tool_name': response['software'][0],
66 | 'task': response['tasks'][0],
67 | # "command": "nmap -sV -sC -p- -oN metasploitable2_scan.txt 192.168.100.231",
68 | "command": response['command'],
69 | }
70 | return {'current_step': 'evaluator'}
71 |
--------------------------------------------------------------------------------
/ui/app/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import React, { useState } from "react";
3 | import Link from "next/link";
4 | import { useRouter } from 'next/navigation';
5 |
6 | export default function Page() {
7 | const router = useRouter();
8 | const [url, setUrl] = useState("");
9 | const [consent, setConsent] = useState(false);
10 | const [task, setTask] = useState("");
11 |
12 |
13 | const handleSubmit = (e) => {
14 | e.preventDefault();
15 | if (task && consent) {
16 | const queryParams = {
17 | target: url,
18 | task: task,
19 | };
20 | router.push(`/chat?target=${queryParams.target}&task=${queryParams.task}`);
21 | }
22 | };
23 |
24 | return (
25 |
26 |
27 |
28 | Welcome to BreachSeek
29 |
30 |
AI-powered pentesting agent
31 |
32 |
33 |
101 |
102 | );
103 | }
104 |
--------------------------------------------------------------------------------
/ui/app/playground/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useEffect, useState } from "react";
3 | import useSWR from "swr";
4 | import axios from "axios";
5 | import ChatInputField from "../../components/chat/ChatInputField";
6 | import UserChatBubble from "../../components/chat/UserChatBubble";
7 | import BotChatBubble from "../../components/chat/BotChatBubble";
8 | import { RemoteRunnable } from "@langchain/core/runnables/remote";
9 | import { useSearchParams } from "next/navigation";
10 | import StreamComponent from "./StreamComponent.jsx";
11 |
12 | // const fetcher = url => axios.post(url).then(res => res.data);
13 |
14 | const chain = new RemoteRunnable({
15 | url: `http://localhost:8000/graph/`,
16 | });
17 | const markdownContent = `
18 | # Hello World
19 |
20 |
21 | # h1 Heading
22 | **This is bold text**
23 | __This is bold text__
24 | *This is italic text*
25 | _This is italic text_
26 | ~~Strikethrough~~
27 | This is a code block:
28 | \`\`\`cli
29 | ls
30 | \`\`\`
31 | ## Tables
32 |
33 | | Syntax | Description |
34 | | ----------- | ----------- |
35 | | Header | Title |
36 | | Paragraph | Text |
37 |
38 | `;
39 |
40 |
41 |
42 | export default function Home() {
43 | const [messages, setMessages] = useState([]);
44 | const [tempMessage, setTempMessage] = useState("");
45 | const [runningCommands, setRunningCommands] = useState([]);
46 | const searchParams = useSearchParams();
47 | const target = searchParams.get('target');
48 | const task = searchParams.get('task');
49 |
50 | useEffect(() => {
51 | if (target || task) {
52 | let message = `Perform this task: ${task}`;
53 | if (target) {
54 | message += ` on this target: ${target}`;
55 | }
56 | handleSubmit(message);
57 | }
58 | }, [target, task]);
59 |
60 | useEffect(
61 | function doNothing(params) {
62 | return;
63 | }
64 | , [tempMessage])
65 |
66 | const handleSubmit = async (message) => {
67 | if (!message) {
68 | return;
69 | }
70 | const messageHistory = [...messages, { message: message, sender: "user" }];
71 | setMessages(messageHistory);
72 | try {
73 | const response = await chain.stream({ task: message });
74 | var bot_message = '';
75 | for await (const chunk of response) {
76 | const json_chunk = JSON.stringify(chunk)
77 | console.log()
78 | const obj = JSON.parse(json_chunk);
79 | if (typeof obj.agent?.kwargs?.messages[0]?.kwargs?.content == 'string' || obj.agent?.kwargs?.messages[0]?.kwargs?.content instanceof String) {
80 | bot_message += obj.agent?.kwargs?.messages[0]?.kwargs?.content
81 | setTempMessage(bot_message);
82 | console.log("temp: ", tempMessage);
83 | }
84 | }
85 | const regex = /```.*```/s;
86 | const matches = tempMessage.match(regex);
87 | console.log("matches: ", matches);
88 | setTempMessage('');
89 | setMessages([...messageHistory, { message: bot_message, sender: "agent" }]);
90 | } catch (error) {
91 | console.error("Error sending message:", error);
92 | }
93 | };
94 |
95 | return (
96 |
97 |
98 |
99 |
100 |
101 | {messages.map((message, index) =>
102 | message.sender === "user" ? (
103 |
104 | {message.message}
105 |
106 | ) : (
107 |
108 | {message.message}
109 |
110 | )
111 | )}
112 | {tempMessage && {tempMessage}}
113 | {markdownContent}
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 | );
122 | }
123 |
--------------------------------------------------------------------------------
/lg_pentest/pentest_agent/tools.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing_extensions import Annotated, TypedDict, List, Optional, Dict, Literal
3 | from lg_pentest.pentest_agent.utils.model import _get_model
4 | from lg_pentest.pentest_agent.utils.state import PentestState
5 | from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
6 | # from langchain_core.pydantic_v1 import BaseModel, Field
7 | from langchain_community.tools import ShellTool
8 | # from langgraph.prebuilt import ToolNode
9 |
10 | shell_tool = ShellTool()
11 | tools = [shell_tool]
12 | # class Task(BaseModel):
13 | # """The task that the tools needs to do"""
14 | # phase: str = Field(description="Current phase of pentesting")
15 | # task: str = Field(description="Task to perform so that the tool agent succeds.")
16 | # hints: Optional[List[str]] = Field(description="May contain hints about the task")
17 | # constraints: Optional[List[str]] = Field(description="Contains constraints that the tool user agent should be aware of")
18 | # program_name: str = Field(description="program name that the tool uses to finish a task")
19 | # args: Optinal[dict] = Field(description="args that might help the tool user agent")
20 | # results: dict = Field(description="Title of findings as keys, and a description as their value")
21 |
22 | # class ToolNode:
23 | # """A node that runs the tools requested in the last AIMessage."""
24 | #
25 | # def __init__(self, tools: list) -> None:
26 | # self.tools_by_name = {tool.name: tool for tool in tools}
27 | #
28 | # def __call__(self, inputs: dict):
29 | # if messages := inputs.get("messages", []):
30 | # message = messages[-1]
31 | # else:
32 | # raise ValueError("No message found in input")
33 | # outputs = []
34 | # for tool_call in message['tool_calls']:
35 | # tool_result = self.tools_by_name[tool_call["name"]].invoke(
36 | # tool_call["args"]
37 | # )
38 | # outputs.append(
39 | # # ToolMessage(
40 | # {
41 | # 'role': 'tool_call',
42 | # 'content': json.dumps(tool_result),
43 | # # 'name': tool_call["name"],
44 | # # 'tool_call_id': tool_call["id"],
45 | # }
46 | # )
47 | # return {"messages": outputs}
48 |
49 | class Task(TypedDict):
50 | """The task that the tool needs to do"""
51 | message: str
52 | # phase: Annotated[str, ... , "Current phase of pentesting"]
53 | # task: Annotated[str, ... , "Task to perform so that the tool agent succeeds."]
54 | # hints: Annotated[Optional[List[str]], ... , "May contain hints about the task"]
55 | # constraints: Annotated[Optional[List[str]], ... , "Contains constraints that the tool user agent should be aware of"]
56 | # program_name: Annotated[str, ... , "Program name that the tool uses to finish a task"]
57 | # args: Annotated[Optional[Dict], ... , "Args that might help the tool user agent"]
58 | # results: Annotated[Dict, ... , "Title of findings as keys, and a description as their value"]
59 | results: str
60 |
61 |
62 | def _swap_messages(messages):
63 | new_messages = []
64 | for m in messages:
65 | if m['role'] == 'assistant':
66 | new_messages.append({"role": "user", "content": m['content']})
67 | else:
68 | new_messages.append({"role": "assistant", "content": m['content']})
69 | return new_messages
70 |
71 | # tools = ToolNode(tools=[shell_tool])
72 |
73 | def tools_node(state: PentestState) -> PentestState:
74 | system_prompt = SystemMessage(f"""
75 | You are an AI agent who is an expert in using a tool named {state['tool_name']},
76 | to do {state['pentest_tasks']}. You are working with a team of AI agents,
77 | you will receive a task, and possibly hints, constrains, and args.
78 | You will have to use your expertise in {state['tool_name']} to finish
79 | the task. When you are done pass the results to the calling agent.
80 |
81 | you have access to bash shell using shell_tool
82 | """)
83 | print(f'{system_prompt = }')
84 | human_msg = HumanMessage(content=f"Use {state['tool_name']} to do {state['task']}")
85 | messages = [system_prompt] + [human_msg]
86 | model = _get_model().bind_tools(tools)
87 | ai_msg = model.invoke(messages)
88 | messages.append(ai_msg)
89 | for tool_call in ai_msg.tool_calls:
90 | selected_tool = {tool.name: tool for tool in tools}[tool_call["name"].lower()]
91 | tool_msg = selected_tool.invoke(tool_call)
92 | # tool_msg = selected_tool.invoke(tool_call)
93 | messages.append(tool_msg)
94 | # tool_msg = shell_tool.invoke(ai_msg.tool_calls)
95 | # messages.append(tool_msg)
96 | messages.append(HumanMessage(content='summarize before parsing'))
97 | model = _get_model().with_structured_output(Task)
98 | response = model.invoke(messages)
99 | print(f'tools:\n{response = }')
100 | return {
101 | 'messages': [{'role': 'assistant',
102 | 'content': '\n\n'.join(response.values())}],
103 | 'tool_results': response['results'],
104 | }
105 |
--------------------------------------------------------------------------------
/ui/app/chat/page.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useEffect, useState } from "react";
3 | import useSWR from "swr";
4 | import axios from "axios";
5 | import ChatInputField from "../../components/chat/ChatInputField";
6 | import UserChatBubble from "../../components/chat/UserChatBubble";
7 | import BotChatBubble from "../../components/chat/BotChatBubble";
8 | import { RemoteRunnable } from "@langchain/core/runnables/remote";
9 | import { useSearchParams } from "next/navigation";
10 | import { fetchEventSource } from "@microsoft/fetch-event-source";
11 | import { useModelContext } from "../../hooks/ModelContext";
12 |
13 | // const fetcher = url => axios.post(url).then(res => res.data);
14 |
15 | const chain = new RemoteRunnable({
16 | url: `http://localhost:8000/graph/`,
17 | });
18 | const markdownContent = `
19 | # Hello World
20 |
21 |
22 | # h1 Heading
23 | **This is bold text**
24 | __This is bold text__
25 | *This is italic text*
26 | _This is italic text_
27 | ~~Strikethrough~~
28 | This is a code block:
29 | \`\`\`cli
30 | ls
31 | \`\`\`
32 | ## Tables
33 |
34 | | Syntax | Description |
35 | | ----------- | ----------- |
36 | | Header | Title |
37 | | Paragraph | Text |
38 |
39 | `;
40 |
41 | export default function Home() {
42 | const [messages, setMessages] = useState([]);
43 | const [tempMessage, setTempMessage] = useState("");
44 | const [tempMessages, setTempMessages] = useState([]);
45 | const [runningCommands, setRunningCommands] = useState([]);
46 | const searchParams = useSearchParams();
47 | const target = searchParams.get("target");
48 | const task = searchParams.get("task");
49 |
50 | const {
51 | modelStatus,
52 | setModelStatus,
53 | modelSummary,
54 | setModelSummary,
55 | setModelFindings,
56 | modelCommands,
57 | setModelCommands,
58 | } = useModelContext();
59 |
60 | // TODO: set model status as a context
61 |
62 | const fetchData = async (message) => {
63 | await fetchEventSource("http://localhost:8000/stream", {
64 | headers: {
65 | "Content-Type": "application/json",
66 | },
67 | method: "POST",
68 | body: JSON.stringify({ query: message }),
69 | onmessage: (ev) => {
70 | console.log(ev.event);
71 | console.log(ev.data);
72 | const obj = JSON.parse(ev.data);
73 | console.log(`ev: ${ev}`);
74 | console.log("ev stringfied", obj.messages);
75 | console.log(`Received event: ${ev.data}`); // for debugging purposes
76 | console.log(`Received event: ${JSON.stringify(obj.messages)}`); // for debugging purposes
77 | console.log(
78 | `Received content: ${obj.messages[obj.messages.length - 1].content}`
79 | ); // for debugging purposes
80 | const Model_status = obj.current_step || obj.agent;
81 | setModelStatus(Model_status == "__end__" ? "Done" : Model_status);
82 |
83 | // TODO:
84 | // setModelSummary();
85 |
86 | // get the findings from the model
87 | const findings = obj.findings;
88 | if (findings) {
89 | // TODO: reset the findings? or append new findings? or update changed findings?
90 | setModelFindings(findings);
91 | }
92 |
93 | // get the command that are running
94 | const runningCommand = obj.command;
95 | if (runningCommand) {
96 | setModelCommands([runningCommand]);
97 | }
98 |
99 | if (obj.agent == "tools_node") {
100 | obj.agent = "pentester";
101 | }
102 | setMessages((prev) => [...prev, obj]);
103 | },
104 | });
105 | };
106 |
107 | useEffect(() => {
108 | if (target || task) {
109 | let message = `Perform this task: ${task}`;
110 | if (target) {
111 | message += ` on this target: ${target}`;
112 | }
113 | handleSubmit(message);
114 | }
115 | }, [target, task]);
116 |
117 | useEffect(() => {}, [tempMessages]);
118 |
119 | // const handleSubmit = async (message) => {
120 | // if (!message) {
121 | // return;
122 | // }
123 | // const messageHistory = [...messages, { message: message, sender: "user" }];
124 | // setMessages(messageHistory);
125 | // try {
126 | // fetchData(message);
127 | // const bot_message = tempMessages.join(" ")
128 | // setTempMessages([]);
129 | // setMessages([...messageHistory, { message: bot_message, sender: "agent" }]);
130 | // } catch (error) {
131 | // console.error("Error sending message:", error);
132 | // }
133 | const handleSubmit = async (message) => {
134 | if (!message) {
135 | return;
136 | }
137 |
138 | // Update the chat window with the new message
139 | const newMessage = { type: "human", content: message };
140 | setMessages((prevMessages) => [...prevMessages, newMessage]);
141 | // send the message to the server
142 | fetchData(message)
143 | .then((response) => {
144 | // TODO: logic with response if needed
145 |
146 | console.log("response", response);
147 | })
148 | .catch((error) => {
149 | console.error("Error sending message:", error);
150 | });
151 | };
152 |
153 | // scroll to the bottom of the chat window when a new message is added
154 | useEffect(() => {
155 | const chatWindow = document.getElementById("chat-window");
156 | chatWindow.scrollTop = chatWindow.scrollHeight;
157 | }, [messages]);
158 |
159 | return (
160 |
161 |
162 |
163 |
164 | {messages.map((message, index) =>
165 | message.type === "human" ? (
166 |
167 | {message.content}
168 |
169 | ) : (
170 | // message here is actually the entire object, sorry for the confusion but no time :)
171 |
175 | {message.messages[message.messages.length - 1].content}
176 |
177 | )
178 | )}
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 | );
187 | }
188 |
--------------------------------------------------------------------------------
/ui/components/common/SideBar.jsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useState, useEffect } from "react";
3 | import SideBarItem from "./SideBarItem";
4 | import StatIndicator from "./StatIndicator";
5 | import GradientBtn from "../buttons/GradientBtn";
6 | import SideBarBtn from "../buttons/SideBarBtn";
7 | import { FiMoon, FiSun, FiSettings, FiEye, FiEyeOff } from "react-icons/fi";
8 | import { HiMiniCommandLine } from "react-icons/hi2";
9 | import { useTheme } from "../../hooks/ThemeContext";
10 | import Image from "next/image";
11 | import { useModelContext } from "../../hooks/ModelContext";
12 | import ModelStatus from "./ModelStatus";
13 |
14 | function SideBar() {
15 | const [showSidebar, setShowSidebar] = useState(true);
16 | const [showCommands, setShowCommands] = useState(false);
17 | const { theme, toggleTheme } = useTheme();
18 | const toggleSidebar = () => setShowSidebar(!showSidebar);
19 | const {
20 | modelStatus,
21 | setModelStatus,
22 | modelSummary,
23 | setModelSummary,
24 | modelFindings,
25 | modelCommands,
26 | } = useModelContext();
27 |
28 | return (
29 |
30 |
53 |
54 | {showSidebar && (
55 |
159 | )}
160 |
161 | );
162 | }
163 |
164 | export default SideBar;
165 |
--------------------------------------------------------------------------------
/ui/public/icon.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------