History modal
47 | 48 |Agents modal
61 | 62 |Workflow modal
75 | 76 |
65 | Status:
66 |
,
, etc.) wrap inside the message */
45 | .notification p,
46 | .notification pre {
47 | margin: 0;
48 | word-wrap: break-word;
49 | white-space: pre-wrap; /* Allow preformatted text to wrap */
50 | }
51 | .message-content {
52 | max-width: 100%;
53 | overflow: hidden;
54 | word-break: break-word;
55 | line-height: 1.4;
56 | }
57 | /* Optional: Add word-breaking for URLs */
58 | .notification a {
59 | word-wrap: break-word;
60 | word-break: break-word;
61 | text-decoration: underline;
62 | }
63 | /*Notification message styles end*/
64 |
65 | .task-progress {
66 | height: 40vh;
67 | overflow-y: auto;
68 | background-color: white;
69 | border-radius: var(--bulma-radius);
70 | }
71 |
72 | @media (min-height: 1200px) {
73 | .task-progress {
74 | height: 50vh;
75 | }
76 | }
77 |
78 | @media (min-height: 1400px) {
79 | .task-progress {
80 | height: 60vh;
81 | }
82 | }
83 |
84 | .task-progress .notification {
85 | padding: 0.5rem 1rem;
86 | display: block;
87 | max-width: 100%;
88 | word-wrap: break-word;
89 | box-sizing: border-box;
90 | overflow-wrap: break-word;
91 | }
92 |
93 | .menu-list .menu-item,
94 | .menu-list a,
95 | .menu-list button {
96 | background-color: transparent;
97 | }
98 |
99 | .menu-list ul.menu-stages {
100 | border-inline-start: 3px solid var(--bulma-border);
101 | padding-inline-start: 0;
102 | }
103 |
104 | .menu-list ul.menu-stages li {
105 | margin-left: calc(-1.4rem - 5px);
106 | }
107 |
108 | .menu-list a.menu-stage {
109 | display: flex;
110 | align-items: center;
111 | position: relative;
112 | padding: 0.5em 0 0.5rem 0.75em;
113 | width: calc(100% + 1.4rem - 5px);
114 | }
115 |
116 | .menu-list a.menu-stage > i {
117 | font-size: 1.4rem;
118 | margin-top: 3px;
119 | border-radius: 50%;
120 | background-color: rgb(247, 249, 251);
121 | padding: 5px;
122 | }
123 |
124 | .menu-list a.menu-stage span {
125 | flex: 1;
126 | word-break: break-word; /*this for stages span alignment*/
127 | }
128 |
129 | .menu-list a.menu-stage.rejected span {
130 | text-decoration: line-through;
131 | opacity: 0.5;
132 | }
133 |
134 | .menu-list a.menu-stage.action_requested span {
135 | font-weight: 500;
136 | }
137 |
138 | .menu-list a.menu-stage div {
139 | display: flex;
140 | align-items: center;
141 | }
142 |
143 | .menu-stage-actions i {
144 | font-size: 1.4rem;
145 | }
146 |
147 | .business-animation {
148 | position: relative;
149 | border-radius: var(--bulma-radius-large);
150 | }
151 |
152 | #taskLoader {
153 | display: flex;
154 | flex-direction: column;
155 | justify-content: center;
156 | align-items: center;
157 | position: absolute;
158 | inset: 0;
159 | color: black;
160 | background-color: rgb(247, 249, 251);
161 | z-index: 1000;
162 | font-weight: 500;
163 | }
164 |
165 | #taskLoader span::before {
166 | content: "Getting task plan...";
167 | animation: taskLoaderAnimation infinite 3s linear;
168 | }
169 |
170 | #taskLoader.is-hidden {
171 | display: none;
172 | }
173 |
174 | @keyframes taskLoaderAnimation {
175 | 0% {
176 | content: "Getting task plan...";
177 | }
178 |
179 | 50% {
180 | content: "Contacting agents...";
181 | }
182 |
183 | 75% {
184 | content: "Loading conversations...";
185 | }
186 | }
187 |
188 | #taskLoader i {
189 | font-size: 3rem;
190 | }
191 |
192 | .task-stage-divider {
193 | text-align: center;
194 | margin: 1rem 0;
195 | font-size: 0.85rem;
196 | font-weight: 500;
197 | border: 1px solid rgb(71, 80, 235);
198 | border-left-width: 0;
199 | border-right-width: 0;
200 | border-bottom-width: 0;
201 | }
202 |
203 | .task-stage-divider legend {
204 | color: rgb(71, 80, 235);
205 | -webkit-padding-start: 1rem;
206 | -webkit-padding-end: 1rem;
207 | background: transparent;
208 | }
209 |
210 | .text-input-container {
211 | position: relative;
212 | border: 1px solid #ccc;
213 | border-radius: 8px;
214 | background-color: white;
215 | }
216 |
217 | textarea {
218 | width: 98%;
219 | padding: 16px 0px 0px 0px;
220 | border: none;
221 | border-radius: 8px 8px 0 0;
222 | font-size: 16px;
223 | line-height: 1.5;
224 | resize: none;
225 | outline: none;
226 | overflow: hidden;
227 | margin: 0 10px;
228 | align-items: center;
229 | background-color: white;
230 | }
231 |
232 | .star-icon {
233 | margin-right: 10px;
234 | cursor: pointer;
235 | }
236 |
237 | .char-count {
238 | font-size: 14px;
239 | color: #888;
240 | }
241 |
242 | .middle-bar {
243 | display: flex;
244 | justify-content: space-between;
245 | align-items: left;
246 | padding: 0px 5px;
247 | /* background-color: white; */
248 | }
249 |
250 | .bottom-bar {
251 | display: flex;
252 | justify-content: space-between;
253 | align-items: center;
254 | padding: 3px 10px;
255 | border-top: none;
256 | border-bottom: 4px solid #0f6cbd;
257 | /* background-color: white; */
258 | }
259 |
260 | .send-button {
261 | border: none;
262 | background: none;
263 | font-size: 18px;
264 | cursor: pointer;
265 | color: #007bff;
266 | padding: 4px;
267 | outline: none;
268 | }
269 |
270 | .send-button:hover {
271 | color: #0056b3;
272 | }
273 |
274 | .menu.task-menu {
275 | position: sticky;
276 | top: 0;
277 | }
--------------------------------------------------------------------------------
/src/frontend/wwwroot/app.css:
--------------------------------------------------------------------------------
1 | @import "https://cdn.jsdelivr.net/npm/bulma@1.0.2/css/bulma.min.css";
2 | @import "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.6.0/css/all.min.css";
3 | @import "assets/theme.css";
4 |
5 | /* App global */
6 |
7 | html,
8 | body {
9 | overflow-x: hidden;
10 | overflow-y: auto;
11 | height: 100%;
12 | }
13 |
14 | body {
15 | position: relative;
16 | background: rgb(247, 249, 251);
17 | min-height: 100vh;
18 | }
19 |
20 | .border-right {
21 | border-right: 1px solid hsl(221, 14%, calc(86% + 0%));
22 | }
23 |
24 | /* App template */
25 |
26 | #app .columns {
27 | min-height: 100vh;
28 | height: 100%;
29 | }
30 | #app .modal,
31 | #app .menu {
32 | overflow: hidden; /* Prevent scrolling within modals and menus */
33 | }
34 | #app .asside {
35 | background: rgba(231, 236, 243, 0.7);
36 | }
37 | ul#tasksStats.menu-list {
38 | min-height: 100px;
39 | }
40 | @media (min-width: 1800px) {
41 | #app .asside {
42 | max-width: 400px;
43 | }
44 | }
45 |
46 | #app .menu-logo {
47 | font-size: 1.25rem;
48 | font-weight: 700;
49 | cursor: pointer;
50 | }
51 |
52 | #app .menu-logo img {
53 | width: 30px;
54 | }
55 |
56 | #app .asside .menu-list a {
57 | background-color: transparent;
58 | }
59 |
60 | #app .asside .menu-list a.is-active {
61 | background-color: rgb(71, 80, 235);
62 | }
63 |
64 | #app .asside .menu-list a.is-active i {
65 | color: white !important;
66 | }
67 |
68 | #app .asside .menu-list a.is-active:hover {
69 | background-color: rgb(71, 80, 235);
70 | }
71 |
72 | #app .asside .menu-list a.menu-task {
73 | display: flex;
74 | align-items: center;
75 | }
76 |
77 | #app .asside .menu-list a.menu-task span {
78 | flex: 1;
79 | }
80 |
81 | #app .asside .menu-list a:hover {
82 | background-color: rgba(0, 0, 0, 0.1);
83 | }
84 |
85 | #app .iframe {
86 | width: 100%;
87 | background-color: transparent;
88 | }
89 |
90 | #app .context-switch {
91 | position: fixed;
92 | bottom: 50px;
93 | right: calc(50% - 220px);
94 | z-index: 3;
95 | }
96 |
97 | .is-avatar.is-rounded {
98 | border-radius: var(--bulma-radius-rounded);
99 | }
100 |
101 | .is-avatar.is-agent {
102 | display: flex;
103 | /* background-color: rgba(231, 236, 243, 0.7); */
104 | background-color: rgba(70, 79, 235, 0.25);
105 | }
106 |
107 | .is-avatar.is-agent img {
108 | width: 75%;
109 | height: 75%;
110 | margin: 13%;
111 | }
112 |
113 | @keyframes moveImage {
114 | 0% {
115 | transform: rotate(0deg);
116 | }
117 |
118 | 50% {
119 | transform: rotate(-3deg);
120 | }
121 |
122 | 100% {
123 | transform: rotate(3deg);
124 | }
125 | }
126 |
127 | .is-avatar.is-agent img.manager {
128 | background-color: rgba(220, 56, 72, 0.35);
129 | box-shadow: 0 0 0 4px rgba(220, 56, 72, 0.35);
130 | animation: moveImage 0.3s infinite alternate;
131 | }
132 |
133 | .is-avatar.is-agent img.hr_agent {
134 | background-color: rgba(0, 209, 178, 0.35);
135 | box-shadow: 0 0 0 4px rgba(0, 209, 178, 0.35);
136 | animation: moveImage 0.5s infinite alternate;
137 | }
138 |
139 | .is-avatar.is-agent img.procurement_agent {
140 | background-color: rgba(255, 183, 15, 0.35);
141 | box-shadow: 0 0 0 4px rgba(255, 183, 15, 0.35);
142 | animation: moveImage 0.1s infinite alternate;
143 | }
144 |
145 | .is-avatar.is-agent img.tech_agent {
146 | background-color: rgba(178, 222, 39, 0.35);
147 | box-shadow: 0 0 0 4px rgba(178, 222, 39, 0.35);
148 | animation: moveImage 0.7s infinite alternate;
149 | }
150 |
151 | .is-avatar.is-agent img.unknown {
152 | background-color: rgba(39, 57, 222, 0.35);
153 | box-shadow: 0 0 0 4px rgba(39, 57, 222, 0.35);
154 | animation: moveImage 0.7s infinite alternate;
155 | }
156 |
157 | .is-avatar.has-status::after {
158 | content: "";
159 | position: absolute;
160 | bottom: 0;
161 | right: 0;
162 | width: 30%;
163 | height: 30%;
164 | border-radius: 50%;
165 | background-color: rgb(255, 255, 255);
166 | border: 2px solid rgb(255, 255, 255);
167 | }
168 |
169 | .is-avatar.has-status.has-status-active::after {
170 | background-color: hsl(
171 | var(--bulma-success-h),
172 | var(--bulma-success-s),
173 | var(--bulma-success-l)
174 | );
175 | }
176 |
177 | .is-avatar.has-status.has-status-busy::after {
178 | background-color: hsl(
179 | var(--bulma-danger-h),
180 | var(--bulma-danger-s),
181 | var(--bulma-danger-l)
182 | );
183 | }
184 |
185 | .is-avatar.has-status.has-status-paused::after {
186 | background-color: hsl(
187 | var(--bulma-dark-h),
188 | var(--bulma-dark-s),
189 | var(--bulma-dark-l)
190 | );
191 | }
192 |
193 | .button.is-greyed-out {
194 | background-color: #e0e0e0;
195 | color: lightgrey;
196 | cursor: not-allowed;
197 | }
198 |
199 | .button.is-selected {
200 | background-color: #d3d3d3;
201 | color: #000;
202 | }
203 |
204 | .notyf__toast {
205 | max-width: 100% !important;
206 | border-radius: var(--bulma-control-radius) !important;
207 | }
208 |
209 | .notyf__wrapper {
210 | padding: 0.75rem 0.5rem !important;
211 | }
212 | /* Menu list scroll style start*/
213 | #app .asside .menu-list {
214 | max-height: 400px;
215 | overflow-y: scroll;
216 | padding-right: 2px;
217 | transition: all 0.3s ease;
218 | box-sizing: border-box;
219 | }
220 | /* Hide the scrollbar initially (before hover) */
221 | #app .asside .menu-list::-webkit-scrollbar {
222 | width: 8px;
223 | opacity: 0;
224 | visibility: hidden;
225 | transition: opacity 0.3s ease, visibility 0s 0.3s;
226 | }
227 | /* Style the scrollbar thumb (the draggable part) */
228 | #app .asside .menu-list::-webkit-scrollbar-thumb {
229 | border-radius: 10px;
230 | transition: background-color 0.3s ease;
231 | }
232 | /* Show the scrollbar and thumb when hovering */
233 | #app .asside .menu-list:hover::-webkit-scrollbar {
234 | opacity: 1;
235 | visibility: visible;
236 | transition: opacity 0.3s ease, visibility 0s;
237 | }
238 | /* Style the thumb when hovering */
239 | #app .asside .menu-list:hover::-webkit-scrollbar-thumb {
240 | background-color: rgba(0, 0, 0, 0.2);
241 | }
242 | /* Menu list scroll style end*/
243 |
--------------------------------------------------------------------------------
/src/backend/agents/base_agent.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from typing import Any, List, Mapping
3 |
4 | from autogen_core import AgentId, MessageContext
5 | from autogen_core import RoutedAgent, message_handler
6 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
7 | from autogen_core.models import (AssistantMessage, LLMMessage, SystemMessage,
8 | UserMessage)
9 | from autogen_core.tool_agent import tool_agent_caller_loop
10 | from autogen_core.tools import Tool
11 |
12 | from context.cosmos_memory import CosmosBufferedChatCompletionContext
13 | from models.messages import (ActionRequest, ActionResponse,
14 | AgentMessage, Step, StepStatus)
15 | from event_utils import track_event_if_configured
16 |
17 | class BaseAgent(RoutedAgent):
18 | def __init__(
19 | self,
20 | agent_name: str,
21 | model_client: AzureOpenAIChatCompletionClient,
22 | session_id: str,
23 | user_id: str,
24 | model_context: CosmosBufferedChatCompletionContext,
25 | tools: List[Tool],
26 | tool_agent_id: AgentId,
27 | system_message: str,
28 | ):
29 | super().__init__(agent_name)
30 | self._agent_name = agent_name
31 | self._model_client = model_client
32 | self._session_id = session_id
33 | self._user_id = user_id
34 | self._model_context = model_context
35 | self._tools = tools
36 | self._tool_schema = [tool.schema for tool in tools]
37 | self._tool_agent_id = tool_agent_id
38 | self._chat_history: List[LLMMessage] = [SystemMessage(content=system_message)]
39 |
40 | @message_handler
41 | async def handle_action_request(
42 | self, message: ActionRequest, ctx: MessageContext
43 | ) -> ActionResponse:
44 | step: Step = await self._model_context.get_step(
45 | message.step_id, message.session_id
46 | )
47 | # TODO: Agent verbosity
48 | # await self._model_context.add_item(
49 | # AgentMessage(
50 | # session_id=message.session_id,
51 | # plan_id=message.plan_id,
52 | # content=f"{self._agent_name} received action request: {message.action}",
53 | # source=self._agent_name,
54 | # step_id=message.step_id,
55 | # )
56 | # )
57 | if not step:
58 | return ActionResponse(
59 | step_id=message.step_id,
60 | status=StepStatus.failed,
61 | message="Step not found in memory.",
62 | )
63 | # TODO - here we use the action message as the source of the action, rather than step.action, as we have added a temporary conversation history to the agent, as a mechanism to give it visibility of the replies of other agents. The logic/approach needs to be thought through further to make it more consistent.
64 | self._chat_history.extend(
65 | [
66 | AssistantMessage(content=message.action, source="GroupChatManager"),
67 | UserMessage(
68 | content=f"{step.human_feedback}. Now make the function call",
69 | source="HumanAgent",
70 | ),
71 | ]
72 | )
73 | try:
74 | messages: List[LLMMessage] = await tool_agent_caller_loop(
75 | caller=self,
76 | tool_agent_id=self._tool_agent_id,
77 | model_client=self._model_client,
78 | input_messages=self._chat_history,
79 | tool_schema=self._tools,
80 | cancellation_token=ctx.cancellation_token,
81 | )
82 | logging.info("*" * 12)
83 | logging.info(f"LLM call completed: {messages}")
84 | final_message = messages[-1]
85 | assert isinstance(final_message.content, str)
86 | result = final_message.content
87 | await self._model_context.add_item(
88 | AgentMessage(
89 | session_id=message.session_id,
90 | user_id=self._user_id,
91 | plan_id=message.plan_id,
92 | content=f"{result}",
93 | source=self._agent_name,
94 | step_id=message.step_id,
95 | )
96 | )
97 |
98 | track_event_if_configured(
99 | "Base agent - Added into the cosmos",
100 | {
101 | "session_id": message.session_id,
102 | "user_id": self._user_id,
103 | "plan_id": message.plan_id,
104 | "content": f"{result}",
105 | "source": self._agent_name,
106 | "step_id": message.step_id,
107 | },
108 | )
109 | except Exception as e:
110 | print(f"Error during LLM call: {e}")
111 | return
112 | print(f"Task completed: {result}")
113 |
114 | step.status = StepStatus.completed
115 | step.agent_reply = result
116 | await self._model_context.update_step(step)
117 |
118 | track_event_if_configured(
119 | "Base agent - Updated step and updated into the cosmos",
120 | {
121 | "status": StepStatus.completed,
122 | "session_id": message.session_id,
123 | "agent_reply": f"{result}",
124 | "user_id": self._user_id,
125 | "plan_id": message.plan_id,
126 | "content": f"{result}",
127 | "source": self._agent_name,
128 | "step_id": message.step_id,
129 | },
130 | )
131 |
132 | action_response = ActionResponse(
133 | step_id=step.id,
134 | plan_id=step.plan_id,
135 | session_id=message.session_id,
136 | result=result,
137 | status=StepStatus.completed,
138 | )
139 |
140 | group_chat_manager_id = AgentId("group_chat_manager", self._session_id)
141 | await self.publish_message(action_response, group_chat_manager_id)
142 | # TODO: Agent verbosity
143 | # await self._model_context.add_item(
144 | # AgentMessage(
145 | # session_id=message.session_id,
146 | # plan_id=message.plan_id,
147 | # content=f"{self._agent_name} sending update to GroupChatManager",
148 | # source=self._agent_name,
149 | # step_id=message.step_id,
150 | # )
151 | # )
152 | return action_response
153 |
154 | def save_state(self) -> Mapping[str, Any]:
155 | print("Saving state:")
156 | return {"memory": self._model_context.save_state()}
157 |
158 | def load_state(self, state: Mapping[str, Any]) -> None:
159 | self._model_context.load_state(state["memory"])
160 |
--------------------------------------------------------------------------------
/src/backend/agents/earningcalls_analyst.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from autogen_core import AgentId
4 | from autogen_core import default_subscription
5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
6 | from autogen_core.tools import FunctionTool, Tool
7 | from typing_extensions import Annotated
8 |
9 | from agents.base_agent import BaseAgent
10 | from context.cosmos_memory import CosmosBufferedChatCompletionContext
11 | from helpers.fmputils import *
12 | from helpers.yfutils import *
13 | from datetime import date, timedelta, datetime
14 | from helpers.summarizeutils import summarize, summarizeTopic
15 | from helpers.dcfutils import DcfUtils
16 |
17 | formatting_instructions = "Instructions: returning the output of this function call verbatim to the user in markdown."
18 | latestEarnings = None
19 |
20 | # Define HR tools (functions)
21 | async def get_earning_calls_transcript(ticker_symbol: str, year:str) -> str:
22 | global latestEarnings
23 | print("Calling get_earning_calls_transcript")
24 | if year is None or year == "latest":
25 | year = datetime.now().year
26 | if datetime.now().month < 3:
27 | year = int(year) - 1
28 |
29 | if latestEarnings is None or len(latestEarnings) == 0:
30 | #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
31 | latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
32 | return (
33 | f"##### Get Earning Calls\n"
34 | f"{formatting_instructions}"
35 | )
36 |
37 | async def summarize_transcripts(ticker_symbol:str, year:str) -> str:
38 | global latestEarnings
39 | if latestEarnings is None or len(latestEarnings) == 0:
40 | #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
41 | latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
42 | print("*"*35)
43 | print("Calling summarize_transcripts")
44 | summarized = summarize(latestEarnings)
45 | print("*"*35)
46 | return (
47 | f"##### Summarized transcripts\n"
48 | f"**Company Name:** {ticker_symbol}\n"
49 | f"**Summary:** {summarized}\n"
50 | f"{formatting_instructions}"
51 | )
52 |
53 | async def management_positive_outlook(ticker_symbol: str, year:str) -> str:
54 | global latestEarnings
55 | if latestEarnings is None or len(latestEarnings) == 0:
56 | #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
57 | latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
58 | print("*"*35)
59 | print("Calling management_positive_outlook")
60 | positiveOutlook = summarizeTopic(latestEarnings, 'Management Positive Outlook')
61 | print("*"*35)
62 | return (
63 | f"##### Management Positive Outlook\n"
64 | f"**Company Name:** {ticker_symbol}\n"
65 | f"**Topic Summary:** {positiveOutlook}\n"
66 | f"{formatting_instructions}"
67 | )
68 |
69 | async def management_negative_outlook(ticker_symbol: str, year:str) -> str:
70 | global latestEarnings
71 | if latestEarnings is None or len(latestEarnings) == 0:
72 | #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
73 | latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
74 | print("*"*35)
75 | print("Calling management_negative_outlook")
76 | negativeOutlook = summarizeTopic(latestEarnings, 'Management Negative Outlook')
77 | print("*"*35)
78 | years = 4
79 | return (
80 | f"##### Management Negative Outlook\n"
81 | f"**Company Name:** {ticker_symbol}\n"
82 | f"**Topic Summary:** {negativeOutlook}\n"
83 | f"{formatting_instructions}"
84 | )
85 |
86 | async def future_growth_opportunity(ticker_symbol: str, year:str) -> str:
87 | global latestEarnings
88 | if latestEarnings is None or len(latestEarnings) == 0:
89 | #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
90 | latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
91 | print("*"*35)
92 | print("Calling management_negative_outlook")
93 | futureGrowth = summarizeTopic(latestEarnings, 'Future Growth Opportunities')
94 | print("*"*35)
95 | return (
96 | f"##### Future Growth and Opportunities\n"
97 | f"**Company Name:** {ticker_symbol}\n\n"
98 | f"**Topic Summary:** {futureGrowth}\n"
99 | f"{formatting_instructions}"
100 | )
101 |
102 | # async def analyze_predict_transcript(ticker_symbol: str) -> str:
103 | # return (
104 | # f"##### Transcription Analyze and Prediction\n"
105 | # f"**Company Name:** {ticker_symbol}\n\n"
106 | # f"{formatting_instructions}"
107 | # )
108 |
109 | # Create the Company Analyst Tools list
110 | def get_earning_calls_analyst_tools() -> List[Tool]:
111 | return [
112 | FunctionTool(
113 | get_earning_calls_transcript,
114 | description="get a earning call's transcript for a company",
115 | ),
116 | FunctionTool(
117 | summarize_transcripts,
118 | description="summarize the earning call's transcript for a company",
119 | ),
120 | FunctionTool(
121 | management_positive_outlook,
122 | description="From the extracted earning call's transcript, identify the management's positive outlook for a company",
123 | ),
124 | FunctionTool(
125 | management_negative_outlook,
126 | description="From the extracted earning call's transcript, identify the management's negative outlook for a company",
127 | ),
128 | FunctionTool(
129 | future_growth_opportunity,
130 | description="From the extracted earning call's transcript, identify the future growth and opportunities for a company",
131 | ),
132 | # FunctionTool(
133 | # analyze_predict_transcript,
134 | # description="Analyze and predict the future of a designated company based on the information from the earning call's transcript",
135 | # ),
136 | ]
137 |
138 |
139 | @default_subscription
140 | class EarningCallsAnalystAgent(BaseAgent):
141 | def __init__(
142 | self,
143 | model_client: AzureOpenAIChatCompletionClient,
144 | session_id: str,
145 | user_id: str,
146 | memory: CosmosBufferedChatCompletionContext,
147 | earning_calls_analyst_tools: List[Tool],
148 | earning_calls_analyst_tool_agent_id: AgentId,
149 | ):
150 | super().__init__(
151 | "EarningCallsAnalystAgent",
152 | model_client,
153 | session_id,
154 | user_id,
155 | memory,
156 | earning_calls_analyst_tools,
157 | earning_calls_analyst_tool_agent_id,
158 | system_message="You are an AI Agent. You have knowledge about the management positive and negative outlook, future growths and opportunities based on the earning call transcripts."
159 | )
160 |
--------------------------------------------------------------------------------
/src/backend/helpers/dcfutils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import requests
3 | import numpy as np
4 | import pandas as pd
5 | from datetime import datetime, timedelta
6 | import random
7 | from helpers.dutils import decorate_all_methods
8 | from helpers.summarizeutils import get_next_weekday
9 | import re
10 | from tenacity import RetryError
11 | from tenacity import retry, stop_after_attempt, wait_random_exponential
12 | from langchain.schema import Document
13 | import json
14 | from typing import List
15 | import ast
16 |
17 | # from finrobot.utils import decorate_all_methods, get_next_weekday
18 | from functools import wraps
19 | from typing import Annotated, List
20 |
21 | def init_dcf_api(func):
22 | @wraps(func)
23 | def wrapper(*args, **kwargs):
24 | global dcf_api_key
25 | if os.environ.get("DCF_API_KEY") is None:
26 | print("Please set the environment variable DCF_API_KEY to use the DCF API.")
27 | return None
28 | else:
29 | dcf_api_key = os.environ["DCF_API_KEY"]
30 | print("DCF api key found successfully.")
31 | return func(*args, **kwargs)
32 |
33 | return wrapper
34 |
35 | @decorate_all_methods(init_dcf_api)
36 | class DcfUtils:
37 |
38 | def correct_date(yr, dt):
39 | """Some transcripts have incorrect date, correcting it
40 |
41 | Args:
42 | yr (int): actual
43 | dt (datetime): given date
44 |
45 | Returns:
46 | datetime: corrected date
47 | """
48 | dt = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
49 | if dt.year != yr:
50 | dt = dt.replace(year=yr)
51 | return dt.strftime("%Y-%m-%d %H:%M:%S")
52 |
53 | def extract_speakers(cont: str) -> List[str]:
54 | """Extract the list of speakers
55 |
56 | Args:
57 | cont (str): transcript content
58 |
59 | Returns:
60 | List[str]: list of speakers
61 | """
62 | pattern = re.compile(r"\n(.*?):")
63 | matches = pattern.findall(cont)
64 |
65 | return list(set(matches))
66 |
67 | def clean_speakers(speaker):
68 | speaker = re.sub("\n", "", speaker)
69 | speaker = re.sub(":", "", speaker)
70 | return speaker
71 |
72 | def get_earnings_transcript(quarter: str, ticker: str, year: int):
73 | """Get the earnings transcripts
74 |
75 | Args:
76 | quarter (str)
77 | ticker (str)
78 | year (int)
79 | """
80 | response = requests.get(
81 | f"https://discountingcashflows.com/api/transcript/?ticker={ticker}&quarter={quarter}&year={year}&key={dcf_api_key}"
82 | )
83 |
84 | resp_text = json.loads(response.text)
85 | # speakers_list = extract_speakers(resp_text[0]["content"])
86 | corrected_date = DcfUtils.correct_date(resp_text[0]["year"], resp_text[0]["date"])
87 | resp_text[0]["date"] = corrected_date
88 | return resp_text[0]
89 |
90 | def get_earnings_all_quarters_data(quarter: str, ticker: str, year: int):
91 | docs = []
92 | resp_dict = DcfUtils.get_earnings_transcript(quarter, ticker, year)
93 |
94 | content = resp_dict["content"]
95 | pattern = re.compile(r"\n(.*?):")
96 | matches = pattern.finditer(content)
97 |
98 | speakers_list = []
99 | ranges = []
100 | for match_ in matches:
101 | # print(match.span())
102 | span_range = match_.span()
103 | # first_idx = span_range[0]
104 | # last_idx = span_range[1]
105 | ranges.append(span_range)
106 | speakers_list.append(match_.group())
107 | speakers_list = [DcfUtils.clean_speakers(sl) for sl in speakers_list]
108 |
109 | for idx, speaker in enumerate(speakers_list[:-1]):
110 | start_range = ranges[idx][1]
111 | end_range = ranges[idx + 1][0]
112 | speaker_text = content[start_range + 1 : end_range]
113 |
114 | docs.append(
115 | Document(
116 | page_content=speaker_text,
117 | metadata={"speaker": speaker, "quarter": quarter},
118 | )
119 | )
120 |
121 | docs.append(
122 | Document(
123 | page_content=content[ranges[-1][1] :],
124 | metadata={"speaker": speakers_list[-1], "quarter": quarter},
125 | )
126 | )
127 | return docs, speakers_list
128 |
129 | def get_earning_calls(ticker: str) -> str:
130 |
131 | url = f"https://discountingcashflows.com/api/transcript/list/?ticker={ticker}&key={dcf_api_key}"
132 |
133 | response = requests.get(url)
134 |
135 | if response.status_code == 200:
136 | data = ast.literal_eval(response.text)
137 | quarter, year = data[0][0], data[0][1]
138 |
139 | resp_dict = DcfUtils.get_earnings_transcript("Q" + str(quarter), ticker, year)
140 |
141 | transcripts = resp_dict["content"]
142 | return transcripts
143 | else:
144 | return f"Failed to retrieve data: {response.status_code}"
145 |
146 | def get_earnings_all_docs(ticker: str, year: int):
147 | earnings_docs = []
148 | earnings_call_quarter_vals = []
149 | print("Earnings Call Q1")
150 | try:
151 | docs, speakers_list_1 = DcfUtils.get_earnings_all_quarters_data("Q1", ticker, year)
152 | earnings_call_quarter_vals.append("Q1")
153 | earnings_docs.extend(docs)
154 | except RetryError:
155 | print(f"Don't have the data for Q1")
156 | speakers_list_1 = []
157 |
158 | print("Earnings Call Q2")
159 | try:
160 | docs, speakers_list_2 = DcfUtils.get_earnings_all_quarters_data("Q2", ticker, year)
161 | earnings_call_quarter_vals.append("Q2")
162 | earnings_docs.extend(docs)
163 | except RetryError:
164 | print(f"Don't have the data for Q2")
165 | speakers_list_2 = []
166 | print("Earnings Call Q3")
167 | try:
168 | docs, speakers_list_3 = DcfUtils.get_earnings_all_quarters_data("Q3", ticker, year)
169 | earnings_call_quarter_vals.append("Q3")
170 | earnings_docs.extend(docs)
171 | except RetryError:
172 | print(f"Don't have the data for Q3")
173 | speakers_list_3 = []
174 | print("Earnings Call Q4")
175 | try:
176 | docs, speakers_list_4 = DcfUtils.get_earnings_all_quarters_data("Q4", ticker, year)
177 | earnings_call_quarter_vals.append("Q4")
178 | earnings_docs.extend(docs)
179 | except RetryError:
180 | print(f"Don't have the data for Q4")
181 | speakers_list_4 = []
182 | return (
183 | earnings_docs,
184 | earnings_call_quarter_vals,
185 | speakers_list_1,
186 | speakers_list_2,
187 | speakers_list_3,
188 | speakers_list_4,
189 | )
190 |
--------------------------------------------------------------------------------
/src/frontend/wwwroot/home/home.js:
--------------------------------------------------------------------------------
1 | (() => {
2 | const notyf = new Notyf({
3 | position: { x: "right", y: "top" },
4 | ripple: false,
5 | duration: 3000,
6 | });
7 | const apiEndpoint = sessionStorage.getItem("apiEndpoint");
8 | const newTaskPrompt = document.getElementById("newTaskPrompt");
9 | const startTaskButton = document.getElementById("startTaskButton");
10 | const startTaskButtonContainer = document.querySelector(".send-button");
11 | const startTaskButtonImg = startTaskButtonContainer
12 | ? startTaskButtonContainer.querySelector("img")
13 | : null;
14 |
15 | newTaskPrompt.focus();
16 |
17 | // Create spinner element
18 | const createSpinner = () => {
19 | if (!document.getElementById("spinnerContainer")) {
20 | const spinnerContainer = document.createElement("div");
21 | spinnerContainer.id = "spinnerContainer";
22 | spinnerContainer.innerHTML = `
23 |
24 |
25 |
26 |
27 |
28 | `;
29 | document.body.appendChild(spinnerContainer);
30 | }
31 | };
32 |
33 | // Function to create and add the overlay
34 | const createOverlay = () => {
35 | let overlay = document.getElementById("overlay");
36 | if (!overlay) {
37 | overlay = document.createElement("div");
38 | overlay.id = "overlay";
39 | document.body.appendChild(overlay);
40 | }
41 | };
42 |
43 | const showOverlay = () => {
44 | const overlay = document.getElementById("overlay");
45 | if (overlay) {
46 | overlay.style.display = "block";
47 | }
48 | createSpinner();
49 | };
50 |
51 | const hideOverlay = () => {
52 | const overlay = document.getElementById("overlay");
53 | if (overlay) {
54 | overlay.style.display = "none";
55 | }
56 | removeSpinner();
57 | };
58 |
59 | // Remove spinner element
60 | const removeSpinner = () => {
61 | const spinnerContainer = document.getElementById("spinnerContainer");
62 | if (spinnerContainer) {
63 | spinnerContainer.remove();
64 | }
65 | };
66 |
67 | // Function to update button image based on textarea content
68 | const updateButtonImage = () => {
69 | if (startTaskButtonImg) {
70 | if (newTaskPrompt.value.trim() === "") {
71 | startTaskButtonImg.src = "../assets/images/air-button.svg";
72 | startTaskButton.disabled = true;
73 | } else {
74 | startTaskButtonImg.src = "/assets/Send.svg";
75 | startTaskButtonImg.style.width = "16px";
76 | startTaskButtonImg.style.height = "16px";
77 | startTaskButton.disabled = false;
78 | }
79 | }
80 | };
81 |
82 | const startTask = () => {
83 | startTaskButton.addEventListener("click", (event) => {
84 | if (startTaskButton.disabled) {
85 | return;
86 | }
87 | const sessionId =
88 | "sid_" + new Date().getTime() + "_" + Math.floor(Math.random() * 10000);
89 |
90 | newTaskPrompt.disabled = true;
91 | startTaskButton.disabled = true;
92 | startTaskButton.classList.add("is-loading");
93 | createOverlay();
94 | showOverlay();
95 | window.headers.then((headers) => {
96 | fetch(apiEndpoint + "/input_task", {
97 | method: "POST",
98 | headers: headers,
99 | body: JSON.stringify({
100 | session_id: sessionId,
101 | description: newTaskPrompt.value,
102 | }),
103 | })
104 | .then((response) => response.json())
105 | .then((data) => {
106 | if (data.status == "Plan not created") {
107 | notyf.error("Unable to create plan for this task.");
108 | newTaskPrompt.disabled = false;
109 | startTaskButton.disabled = false;
110 | return;
111 | }
112 |
113 | console.log("startTaskButton", data);
114 |
115 | newTaskPrompt.disabled = false;
116 | startTaskButton.disabled = false;
117 | startTaskButton.classList.remove("is-loading");
118 |
119 | window.parent.postMessage(
120 | {
121 | action: "taskStarted",
122 | session_id: data.session_id,
123 | task_id: data.plan_id,
124 | task_name: newTaskPrompt.value,
125 | },
126 | "*"
127 | );
128 |
129 | newTaskPrompt.value = "";
130 |
131 | // Reset character count to 0
132 | const charCount = document.getElementById("charCount");
133 | if (charCount) {
134 | charCount.textContent = "0";
135 | }
136 | updateButtonImage();
137 | notyf.success("Task created successfully. AI agents are on it!");
138 |
139 | // Remove spinner and hide overlay
140 | removeSpinner();
141 | hideOverlay();
142 | })
143 | .catch((error) => {
144 | console.error("Error:", error);
145 | newTaskPrompt.disabled = false;
146 | startTaskButton.disabled = false;
147 | startTaskButton.classList.remove("is-loading");
148 |
149 | // Remove spinner and hide overlay
150 | removeSpinner();
151 | hideOverlay();
152 | });
153 | });
154 | });
155 | };
156 |
157 | const quickTasks = () => {
158 | document.querySelectorAll(".quick-task").forEach((task) => {
159 | task.addEventListener("click", (event) => {
160 | const quickTaskPrompt =
161 | task.querySelector(".quick-task-prompt").innerHTML;
162 | newTaskPrompt.value = quickTaskPrompt.trim().replace(/\s+/g, " ");
163 | const charCount = document.getElementById("charCount");
164 | // Update character count
165 | charCount.textContent = newTaskPrompt.value.length;
166 | updateButtonImage();
167 | newTaskPrompt.focus();
168 | });
169 | });
170 | };
171 | const handleTextAreaTyping = () => {
172 | const newTaskPrompt = document.getElementById("newTaskPrompt");
173 | newTaskPrompt.addEventListener("input", () => {
174 | // const textInput = document.getElementById("newTaskPrompt");
175 | const charCount = document.getElementById("charCount");
176 |
177 | // Update character count
178 | charCount.textContent = newTaskPrompt.value.length;
179 |
180 | // Dynamically adjust height
181 | newTaskPrompt.style.height = "auto";
182 | newTaskPrompt.style.height = newTaskPrompt.scrollHeight + "px";
183 |
184 | updateButtonImage();
185 | });
186 |
187 | newTaskPrompt.addEventListener("keydown", (event) => {
188 | const textValue = newTaskPrompt.value.trim();
189 | // If Enter is pressed without Shift, and the textarea is empty, prevent default behavior
190 | if (event.key === "Enter" && !event.shiftKey) {
191 | if (textValue === "") {
192 | event.preventDefault(); // Disable Enter when textarea is empty
193 | } else {
194 | // If there's content in the textarea, allow Enter to trigger the task button click
195 | startTaskButton.click();
196 | }
197 | } else if (event.key === "Enter" && event.shiftKey) {
198 | return;
199 | }
200 | });
201 | };
202 |
203 | updateButtonImage();
204 | startTask();
205 | quickTasks();
206 | handleTextAreaTyping();
207 | })();
208 |
--------------------------------------------------------------------------------
/src/backend/agents/forecaster.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from autogen_core import AgentId
4 | from autogen_core import default_subscription
5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
6 | from autogen_core.tools import FunctionTool, Tool
7 |
8 | from agents.base_agent import BaseAgent
9 | from context.cosmos_memory import CosmosBufferedChatCompletionContext
10 | from helpers.fmputils import *
11 | from helpers.yfutils import *
12 | from helpers.analyzer import *
13 | from typing import List, Dict, Any
14 | import json
15 |
16 | formatting_instructions = "Instructions: returning the output of this function call verbatim to the user in markdown."
17 |
18 | async def analyze_and_predict(analysis_result: Dict[str, Any]) -> str:
19 | """
20 | Takes the JSON output from ExtendedCombinedAnalysisAgent (technical indicators,
21 | candlestick patterns, fundamentals, news sentiment, final decision),
22 | and uses an LLM to produce a structured forecast with:
23 | 1) A multi-section format (Introduction, Technical, Fundamental, etc.)
24 | 2) An explanation of probability/score as confidence (e.g., 70% => "moderately strong")
25 | 3) A final recommendation
26 | 4) Legal disclaimers
27 |
28 | Returns a markdown or text response with these structured sections.
29 | """
30 | # Convert analysis_result into a JSON string
31 | analysis_json_str = json.dumps(analysis_result, indent=2)
32 |
33 | # Extract the final probability from the JSON for prompt usage
34 | final_decision = analysis_result.get("final_decision", {})
35 | probability_value = final_decision.get("probability", None)
36 | rating_value = final_decision.get("rating", "hold")
37 |
38 | # We can provide instructions to interpret the confidence level:
39 | # e.g., 0.0-0.33 => "low confidence", 0.33-0.66 => "moderate confidence", 0.66-1.0 => "high confidence"
40 | # We'll do a bit of logic to embed in the prompt. Alternatively, let the LLM do it entirely.
41 | confidence_descriptor = "moderate"
42 | if probability_value is not None:
43 | if probability_value <= 0.33:
44 | confidence_descriptor = "low"
45 | elif probability_value >= 0.66:
46 | confidence_descriptor = "high"
47 | else:
48 | confidence_descriptor = "moderate"
49 |
50 | # Construct a detailed prompt with strict output structure
51 | prompt = f"""
52 | You are a specialized financial analysis LLM. You have received a JSON structure that
53 | represents an extended analysis of a stock, including:
54 | - Technical signals (RSI, MACD, Bollinger, EMA crossover, Stochastics, ADX)
55 | - Candlestick pattern detections (TA-Lib)
56 | - Basic fundamentals (P/E ratios, etc.)
57 | - News sentiment
58 | - A final numeric probability (score) and rating (Buy/Sell/Hold).
59 |
60 | The JSON data is:
61 |
62 | ```
63 | {analysis_json_str}
64 | ```
65 |
66 | **Please return your answer in the following sections:**
67 |
68 | 1) **Introduction**
69 | - Briefly introduce the analysis.
70 |
71 | 2) **Technical Overview**
72 | - Summarize the key technical indicators and any candlestick patterns.
73 | - Explain whether they are bullish, bearish, or neutral.
74 |
75 | 3) **Fundamental Overview**
76 | - Mention any notable fundamental data (like forwardPE, trailingPE, etc.)
77 | and how it influences the outlook.
78 |
79 | 4) **News & Sentiment**
80 | - Highlight the sentiment score (range: -1.0 to +1.0).
81 | Explain if it's a tailwind (positive) or headwind (negative).
82 |
83 | 5) **Probability & Confidence**
84 | - The system’s final probability is **{probability_value}** (range: 0.0 to 1.0).
85 | - Interpret it as **{confidence_descriptor}** confidence
86 | (e.g., <=0.33 => "low", 0.33-0.66 => "moderate", >=0.66 => "high").
87 | - Elaborate how confident or uncertain this rating might be based on
88 | conflicting signals, volatility, etc.
89 |
90 | 6) **Final Recommendation**
91 | - Based on the system’s final rating: **{rating_value}**.
92 | - Explain briefly why you agree or disagree, or how you interpret it.
93 |
94 | 7) **Disclaimers**
95 | - Include disclaimers such as "Past performance is not indicative of future results."
96 | - Remind the user that this is not guaranteed investment advice.
97 | - Encourage further research before making any decisions.
98 |
99 | Please format your response in **Markdown**, with headings for each section
100 | and bullet points where appropriate.
101 | """
102 |
103 | return prompt
104 | # Now we call the LLM with this prompt. We'll mock the response for this example.
105 | # In real usage, you'd do something like:
106 | # response = await model_client.get_chat_completion(
107 | # system_message="You are a financial analysis LLM.",
108 | # user_message=prompt,
109 | # temperature=0.7,
110 | # max_tokens=1200,
111 | # )
112 | #
113 |
114 | # Create the Company Analyst Tools list
115 | def get_forecaster_tools() -> List[Tool]:
116 | return [
117 | FunctionTool(
118 | analyze_and_predict,
119 | description=(
120 | "Interprets the JSON output from ExtendedCombinedAnalysisAgent. "
121 | "Generates a final Buy/Sell/Hold recommendation with a structured rationale, "
122 | "risk factors, disclaimers, and an explanation of the probability or confidence."
123 | ),
124 | ),
125 | ]
126 |
127 |
128 | @default_subscription
129 | class ForecasterAgent(BaseAgent):
130 | def __init__(
131 | self,
132 | model_client: AzureOpenAIChatCompletionClient,
133 | session_id: str,
134 | user_id: str,
135 | memory: CosmosBufferedChatCompletionContext,
136 | forecaster_tools: List[Tool],
137 | forecaster_tool_agent_id: AgentId,
138 | ):
139 | super().__init__(
140 | "ForecasterAgent",
141 | model_client,
142 | session_id,
143 | user_id,
144 | memory,
145 | forecaster_tools,
146 | forecaster_tool_agent_id,
147 | #system_message="You are an AI Agent. You have knowledge about the SEC annual (10-K) and quarterly (10-Q) reports. SEC reports includes the information about income statement, balance sheet, cash flow, risk assessment, competitor analysis, business highlights and business information."
148 | system_message=dedent(
149 | f"""
150 | You are a Forecaster and Analysis Agent.
151 | Your role is to interpret the output of an extended technical & fundamental analysis pipeline
152 | and additional data from the list of one or more the following:
153 | - Business Overview
154 | - Risk Assessment
155 | - Market Position
156 | - Income Statement
157 | - Segment Statement
158 | - Income Summarization
159 | - Competitor Analysis
160 | - Business Highlights
161 | - Business Information
162 | - Earnings Call Transcripts
163 | - SEC Reports
164 | - Analyst Reports
165 | - News
166 | - Stock Price Data
167 | Produce a final recommendation (Buy, Sell, or Hold) with
168 | a structured format and thorough, bullet-pointed explanation.
169 | You must mention the final probability, interpret it as confidence level,
170 | and provide disclaimers like "Past performance is not indicative of future results.
171 | """
172 | )
173 | )
174 |
--------------------------------------------------------------------------------
/src/backend/helpers/yfutils.py:
--------------------------------------------------------------------------------
1 | import yfinance as yf
2 | from typing import Annotated, Callable, Any, Optional
3 | from pandas import DataFrame
4 | from functools import wraps
5 | from helpers.dutils import decorate_all_methods
6 | from helpers.summarizeutils import get_next_weekday, save_output, SavePathType
7 | import random
8 | from datetime import datetime
9 |
10 | def init_ticker(func: Callable) -> Callable:
11 | """Decorator to initialize yf.Ticker and pass it to the function."""
12 |
13 | @wraps(func)
14 | def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any:
15 | ticker = yf.Ticker(symbol)
16 | return func(ticker, *args, **kwargs)
17 |
18 | return wrapper
19 |
20 |
21 | @decorate_all_methods(init_ticker)
22 | class yfUtils:
23 |
24 | def get_stock_data(
25 | symbol: Annotated[str, "ticker symbol"],
26 | start_date: Annotated[
27 | str, "start date for retrieving stock price data, YYYY-mm-dd"
28 | ],
29 | end_date: Annotated[
30 | str, "end date for retrieving stock price data, YYYY-mm-dd"
31 | ],
32 | save_path: SavePathType = None,
33 | ) -> DataFrame:
34 | """retrieve stock price data for designated ticker symbol"""
35 | ticker = symbol
36 | stock_data = ticker.history(start=start_date, end=end_date)
37 | save_output(stock_data, f"Stock data for {ticker.ticker}", save_path)
38 | return stock_data
39 |
40 | def get_stock_info(
41 | symbol: Annotated[str, "ticker symbol"],
42 | ) -> dict:
43 | """Fetches and returns latest stock information."""
44 | ticker = symbol
45 | stock_info = ticker.info
46 | return stock_info
47 |
48 | def get_company_info(
49 | symbol: Annotated[str, "ticker symbol"],
50 | save_path: Optional[str] = None,
51 | ) -> DataFrame:
52 | """Fetches and returns company information as a DataFrame."""
53 | ticker = symbol
54 | info = ticker.info
55 | company_info = {
56 | "Company Name": info.get("shortName", "N/A"),
57 | "Industry": info.get("industry", "N/A"),
58 | "Sector": info.get("sector", "N/A"),
59 | "Country": info.get("country", "N/A"),
60 | "Website": info.get("website", "N/A"),
61 | }
62 | company_info_df = DataFrame([company_info])
63 | if save_path:
64 | company_info_df.to_csv(save_path)
65 | print(f"Company info for {ticker.ticker} saved to {save_path}")
66 | return company_info_df
67 |
68 | def get_stock_dividends(
69 | symbol: Annotated[str, "ticker symbol"],
70 | save_path: Optional[str] = None,
71 | ) -> DataFrame:
72 | """Fetches and returns the latest dividends data as a DataFrame."""
73 | ticker = symbol
74 | dividends = ticker.dividends
75 | if save_path:
76 | dividends.to_csv(save_path)
77 | print(f"Dividends for {ticker.ticker} saved to {save_path}")
78 | return dividends
79 |
80 | def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
81 | """Fetches and returns the latest income statement of the company as a DataFrame."""
82 | ticker = symbol
83 | income_stmt = ticker.financials
84 | return income_stmt
85 |
86 | def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
87 | """Fetches and returns the latest balance sheet of the company as a DataFrame."""
88 | ticker = symbol
89 | balance_sheet = ticker.balance_sheet
90 | return balance_sheet
91 |
92 | def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
93 | """Fetches and returns the latest cash flow statement of the company as a DataFrame."""
94 | ticker = symbol
95 | cash_flow = ticker.cashflow
96 | return cash_flow
97 |
98 | def get_company_news(
99 | symbol: Annotated[str, "ticker symbol"],
100 | start_date: Annotated[
101 | str,
102 | "start date of the search period for the company's basic financials, yyyy-mm-dd",
103 | ],
104 | end_date: Annotated[
105 | str,
106 | "end date of the search period for the company's basic financials, yyyy-mm-dd",
107 | ],
108 | max_news_num: Annotated[
109 | int, "maximum number of news to return, default to 10"
110 | ] = 25,
111 | ) -> DataFrame:
112 | """Get the url and filing date of the 10-K report for a given stock and year"""
113 |
114 | ticker = symbol
115 | tickerNews = ticker.news
116 |
117 | if tickerNews:
118 | news = [
119 | {
120 | #"date": datetime.fromtimestamp(n["providerPublishTime"]).strftime("%Y-%m-%d %H%M%S"),
121 | "date": n['content']["pubDate"],
122 | "headline": n['content']["title"],
123 | "summary": n['content']["summary"],
124 | }
125 | for n in tickerNews
126 | ]
127 | if len(news) > max_news_num:
128 | news = random.choices(news, k=max_news_num)
129 | news.sort(key=lambda x: x["date"])
130 | output = DataFrame(news)
131 | return output
132 | else:
133 | return f"Failed to retrieve data: {symbol}"
134 |
135 | def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple:
136 | """Fetches the latest analyst recommendations and returns the most common recommendation and its count."""
137 | ticker = symbol
138 | recommendations = ticker.recommendations
139 | if recommendations.empty:
140 | return None, 0 # No recommendations available
141 |
142 | # Assuming 'period' column exists and needs to be excluded
143 | row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary
144 |
145 | # Find the maximum voting result
146 | max_votes = row_0.max()
147 | majority_voting_result = row_0[row_0 == max_votes].index.tolist()
148 |
149 | return majority_voting_result[0], max_votes
150 |
151 | def get_fundamentals(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
152 | """Fetches and returns the latest fundamentals data as a DataFrame."""
153 | ticker = symbol
154 | info = ticker.info # yfinance's fundamental data
155 | # Some commonly used fields: 'forwardPE', 'trailingPE', 'priceToBook', 'beta', 'profitMargins', etc.
156 | # Not all fields are guaranteed to exist for every ticker.
157 | fundamentals = {
158 | "forwardPE": info.get("forwardPE", None),
159 | "trailingPE": info.get("trailingPE", None),
160 | "priceToBook": info.get("priceToBook", None),
161 | "beta": info.get("beta", None),
162 | "bookValue": info.get("bookValue", None),
163 | "trailingEps": info.get("trailingEps", None),
164 | "forwardEps": info.get("forwardEps", None),
165 | "enterpriseToRevenue": info.get("enterpriseToRevenue", None),
166 | "enterpriseToEbitda": info.get("enterpriseToEbitda", None),
167 | "debtToEquity": info.get("debtToEquity", None),
168 | "returnOnEquity": info.get("returnOnEquity", None),
169 | "returnOnAssets": info.get("returnOnAssets", None),
170 | "currentRatio": info.get("currentRatio", None),
171 | "quickRatio": info.get("quickRatio", None),
172 | "trailingPegRatio": info.get("trailingPegRatio", None),
173 | }
174 |
175 | fundamentals_df = DataFrame([fundamentals])
176 | return fundamentals_df
--------------------------------------------------------------------------------
/src/frontend/wwwroot/assets/app-logo.svg:
--------------------------------------------------------------------------------
1 |
65 |
--------------------------------------------------------------------------------
/src/backend/helpers/secutils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import requests
3 | from sec_api import ExtractorApi, QueryApi, RenderApi
4 | from functools import wraps
5 | from typing import Annotated
6 | from helpers.fmputils import fmpUtils
7 | from helpers.dutils import decorate_all_methods
8 | from helpers.summarizeutils import SavePathType
9 |
10 | CACHE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".cache")
11 | PDF_GENERATOR_API = "https://api.sec-api.io/filing-reader"
12 |
13 |
14 | def init_sec_api(func):
15 | @wraps(func)
16 | def wrapper(*args, **kwargs):
17 | global extractor_api, query_api, render_api
18 | if os.environ.get("SEC_API_KEY") is None:
19 | print("Please set the environment variable SEC_API_KEY to use sec_api.")
20 | return None
21 | else:
22 | extractor_api = ExtractorApi(os.environ["SEC_API_KEY"])
23 | query_api = QueryApi(os.environ["SEC_API_KEY"])
24 | render_api = RenderApi(os.environ["SEC_API_KEY"])
25 | print("Sec Api initialized")
26 | return func(*args, **kwargs)
27 |
28 | return wrapper
29 |
30 |
31 | @decorate_all_methods(init_sec_api)
32 | class SECUtils:
33 |
34 | def get_10k_metadata(
35 | ticker: Annotated[str, "ticker symbol"],
36 | start_date: Annotated[
37 | str, "start date of the 10-k file search range, in yyyy-mm-dd format"
38 | ],
39 | end_date: Annotated[
40 | str, "end date of the 10-k file search range, in yyyy-mm-dd format"
41 | ],
42 | ):
43 | """
44 | Search for 10-k filings within a given time period, and return the meta data of the latest one
45 | """
46 | query = {
47 | "query": f'ticker:"{ticker}" AND formType:"10-K" AND filedAt:[{start_date} TO {end_date}]',
48 | "from": 0,
49 | "size": 10,
50 | "sort": [{"filedAt": {"order": "desc"}}],
51 | }
52 | response = query_api.get_filings(query)
53 | if response["filings"]:
54 | return response["filings"][0]
55 | return None
56 |
57 | def download_10k_filing(
58 | ticker: Annotated[str, "ticker symbol"],
59 | start_date: Annotated[
60 | str, "start date of the 10-k file search range, in yyyy-mm-dd format"
61 | ],
62 | end_date: Annotated[
63 | str, "end date of the 10-k file search range, in yyyy-mm-dd format"
64 | ],
65 | save_folder: Annotated[
66 | str, "name of the folder to store the downloaded filing"
67 | ],
68 | ) -> str:
69 | """Download the latest 10-K filing as htm for a given ticker within a given time period."""
70 | metadata = SECUtils.get_10k_metadata(ticker, start_date, end_date)
71 | if metadata:
72 | ticker = metadata["ticker"]
73 | url = metadata["linkToFilingDetails"]
74 |
75 | try:
76 | date = metadata["filedAt"][:10]
77 | file_name = date + "_" + metadata["formType"] + "_" + url.split("/")[-1]
78 |
79 | if not os.path.isdir(save_folder):
80 | os.makedirs(save_folder)
81 |
82 | file_content = render_api.get_filing(url)
83 | file_path = os.path.join(save_folder, file_name)
84 | with open(file_path, "w") as f:
85 | f.write(file_content)
86 | return f"{ticker}: download succeeded. Saved to {file_path}"
87 | except:
88 | return f"❌ {ticker}: downloaded failed: {url}"
89 | else:
90 | return f"No 2023 10-K filing found for {ticker}"
91 |
92 | def download_10k_pdf(
93 | ticker: Annotated[str, "ticker symbol"],
94 | start_date: Annotated[
95 | str, "start date of the 10-k file search range, in yyyy-mm-dd format"
96 | ],
97 | end_date: Annotated[
98 | str, "end date of the 10-k file search range, in yyyy-mm-dd format"
99 | ],
100 | save_folder: Annotated[
101 | str, "name of the folder to store the downloaded pdf filing"
102 | ],
103 | ) -> str:
104 | """Download the latest 10-K filing as pdf for a given ticker within a given time period."""
105 | metadata = SECUtils.get_10k_metadata(ticker, start_date, end_date)
106 | if metadata:
107 | ticker = metadata["ticker"]
108 | filing_url = metadata["linkToFilingDetails"]
109 |
110 | try:
111 | date = metadata["filedAt"][:10]
112 | print(filing_url.split("/")[-1])
113 | file_name = (
114 | date
115 | + "_"
116 | + metadata["formType"].replace("/A", "")
117 | + "_"
118 | + filing_url.split("/")[-1]
119 | + ".pdf"
120 | )
121 |
122 | if not os.path.isdir(save_folder):
123 | os.makedirs(save_folder)
124 |
125 | api_url = f"{PDF_GENERATOR_API}?token={os.environ['SEC_API_KEY']}&type=pdf&url={filing_url}"
126 | response = requests.get(api_url, stream=True)
127 | response.raise_for_status()
128 |
129 | file_path = os.path.join(save_folder, file_name)
130 | with open(file_path, "wb") as file:
131 | for chunk in response.iter_content(chunk_size=8192):
132 | file.write(chunk)
133 | return f"{ticker}: download succeeded. Saved to {file_path}"
134 | except Exception as e:
135 | return f"❌ {ticker}: downloaded failed: {filing_url}, {e}"
136 | else:
137 | return f"No 2023 10-K filing found for {ticker}"
138 |
139 | def get_10k_section(
140 | ticker_symbol: Annotated[str, "ticker symbol"],
141 | fyear: Annotated[str, "fiscal year of the 10-K report"],
142 | section: Annotated[
143 | str | int,
144 | "Section of the 10-K report to extract, should be in [1, 1A, 1B, 2, 3, 4, 5, 6, 7, 7A, 8, 9, 9A, 9B, 10, 11, 12, 13, 14, 15]",
145 | ],
146 | report_address: Annotated[
147 | str,
148 | "URL of the 10-K report, if not specified, will get report url from fmp api",
149 | ] = None,
150 | save_path: SavePathType = None,
151 | ) -> str:
152 | """
153 | Get a specific section of a 10-K report from the SEC API.
154 | """
155 | if isinstance(section, int):
156 | section = str(section)
157 | if section not in [
158 | "1A",
159 | "1B",
160 | "7A",
161 | "9A",
162 | "9B",
163 | ] + [str(i) for i in range(1, 16)]:
164 | raise ValueError(
165 | "Section must be in [1, 1A, 1B, 2, 3, 4, 5, 6, 7, 7A, 8, 9, 9A, 9B, 10, 11, 12, 13, 14, 15]"
166 | )
167 |
168 | # os.makedirs(f"{self.project_dir}/10k", exist_ok=True)
169 |
170 | # report_name = f"{self.project_dir}/10k/section_{section}.txt"
171 |
172 | # if USE_CACHE and os.path.exists(report_name):
173 | # with open(report_name, "r") as f:
174 | # section_text = f.read()
175 | # else:
176 | if report_address is None:
177 | report_address = fmpUtils.get_sec_report(ticker_symbol, fyear)
178 | if report_address.startswith("Link: "):
179 | report_address = report_address.lstrip("Link: ").split()[0]
180 | else:
181 | return report_address # debug info
182 |
183 | cache_path = os.path.join(
184 | CACHE_PATH, f"sec_utils/{ticker_symbol}_{fyear}_{section}.txt"
185 | )
186 | if os.path.exists(cache_path):
187 | with open(cache_path, "r") as f:
188 | section_text = f.read()
189 | else:
190 | section_text = extractor_api.get_section(report_address, section, "text")
191 | os.makedirs(os.path.dirname(cache_path), exist_ok=True)
192 | with open(cache_path, "w") as f:
193 | f.write(section_text)
194 |
195 | if save_path:
196 | os.makedirs(os.path.dirname(save_path), exist_ok=True)
197 | with open(save_path, "w") as f:
198 | f.write(section_text)
199 |
200 | return section_text
--------------------------------------------------------------------------------