Ready to receive inputs.
;
97 | case NodeType.OUTPUT_DISPLAY:
98 | return onMouseDown(e, node.id)}
115 | >
116 |
117 |
118 | {node.data.inputs.map((input) => (
119 |
onHandleMouseDown(e, node.id, input.id, 'input')} />
120 | ))}
121 | {node.data.outputs.map((output) => (
122 | onHandleMouseDown(e, node.id, output.id, 'output')} />
123 | ))}
124 |
125 |
126 | {renderNodeContent()}
127 |
128 |
129 | onResizeMouseDown(e, node.id)}
133 | />
134 |
135 |
136 | );
137 | };
138 |
139 | export default React.memo(NodeComponent);
--------------------------------------------------------------------------------
/App.tsx:
--------------------------------------------------------------------------------
1 |
2 | import React, { useState, useCallback, useRef, useEffect } from 'react';
3 | import type { Node, Edge, Point } from './types';
4 | import { NodeType, NodeStatus } from './types';
5 | import Sidebar from './components/Sidebar';
6 | import NodeComponent from './components/Node';
7 | import EdgeComponent from './components/Edge';
8 | import { PlayIcon } from './components/icons';
9 | import * as geminiService from './services/geminiService';
10 |
11 | const createNode = (type: NodeType, position: Point): Node => {
12 | const id = crypto.randomUUID();
13 | const baseNode = { id, type, position, data: { status: NodeStatus.IDLE, content: null, inputs: [], outputs: [], scale: 1 } };
14 |
15 | switch (type) {
16 | case NodeType.TEXT_INPUT:
17 | return { ...baseNode, data: { ...baseNode.data, label: 'Text Input', outputs: [{ id: `${id}-output`, label: 'Text', type: 'text' }] } };
18 | case NodeType.IMAGE_INPUT:
19 | return { ...baseNode, data: { ...baseNode.data, label: 'Image Input', outputs: [{ id: `${id}-output`, label: 'Image', type: 'image' }] } };
20 | case NodeType.TEXT_GENERATOR:
21 | return { ...baseNode, data: { ...baseNode.data, label: 'Text Generator', inputs: [{ id: `${id}-input`, label: 'Prompt', type: 'text' }], outputs: [{ id: `${id}-output`, label: 'Text', type: 'text' }] } };
22 | case NodeType.IMAGE_EDITOR:
23 | return { ...baseNode, data: { ...baseNode.data, label: 'Image Editor', inputs: [{ id: `${id}-input-image`, label: 'Image', type: 'image' }, { id: `${id}-input-text`, label: 'Prompt', type: 'text' }], outputs: [{ id: `${id}-output-image`, label: 'Image', type: 'image' }, { id: `${id}-output-text`, label: 'Text', type: 'text' }] } };
24 | case NodeType.VIDEO_GENERATOR:
25 | return { ...baseNode, data: { ...baseNode.data, label: 'Video Generator', inputs: [{ id: `${id}-input-image`, label: 'Image (Opt.)', type: 'image' }, { id: `${id}-input-text`, label: 'Prompt', type: 'text' }], outputs: [{ id: `${id}-output`, label: 'Video', type: 'video' }] } };
26 | case NodeType.OUTPUT_DISPLAY:
27 | return { ...baseNode, data: { ...baseNode.data, label: 'Output', inputs: [{ id: `${id}-input`, label: 'Input', type: 'any' }] } };
28 | default:
29 | throw new Error("Unknown node type");
30 | }
31 | };
32 |
33 | const App: React.FC = () => {
34 | const [nodes, setNodes] = useState
>({});
35 | const [edges, setEdges] = useState>({});
36 | const [isProcessing, setIsProcessing] = useState(false);
37 | const [viewTransform, setViewTransform] = useState({ scale: 1, x: 0, y: 0 });
38 | const [connectingEdgeEnd, setConnectingEdgeEnd] = useState(null);
39 |
40 | const draggingNode = useRef<{ id: string; offset: Point } | null>(null);
41 | const connectingEdge = useRef<{ sourceNodeId: string; sourceHandleId: string; } | null>(null);
42 | const panState = useRef<{ startX: number, startY: number } | null>(null);
43 | const resizingNode = useRef<{ id: string, startX: number, startScale: number } | null>(null);
44 | const canvasRef = useRef(null);
45 |
46 | const addNode = useCallback((type: NodeType) => {
47 | const newNode = createNode(type, {
48 | x: (300 - viewTransform.x) / viewTransform.scale,
49 | y: (150 - viewTransform.y) / viewTransform.scale
50 | });
51 | setNodes(prev => ({ ...prev, [newNode.id]: newNode }));
52 | }, [viewTransform]);
53 |
54 | const updateNodeData = useCallback((nodeId: string, data: Partial) => {
55 | setNodes(prev => {
56 | if (!prev[nodeId]) return prev;
57 | return {
58 | ...prev,
59 | [nodeId]: { ...prev[nodeId], data: { ...prev[nodeId].data, ...data } },
60 | }
61 | });
62 | }, []);
63 |
64 | const getHandlePosition = useCallback((nodeId: string, handleId: string): Point => {
65 | const handleElem = document.getElementById(handleId);
66 | if (!handleElem) return { x: 0, y: 0 };
67 |
68 | // getBoundingClientRect provides position relative to the viewport.
69 | const handleRect = handleElem.getBoundingClientRect();
70 | const canvasRect = canvasRef.current!.getBoundingClientRect();
71 |
72 | // Calculate position relative to the canvas container.
73 | const screenX = handleRect.left + handleRect.width / 2 - canvasRect.left;
74 | const screenY = handleRect.top + handleRect.height / 2 - canvasRect.top;
75 |
76 | // Convert screen coordinates to world coordinates by inverting the view transform.
77 | const worldX = (screenX - viewTransform.x) / viewTransform.scale;
78 | const worldY = (screenY - viewTransform.y) / viewTransform.scale;
79 |
80 | return { x: worldX, y: worldY };
81 | }, [viewTransform]);
82 |
83 | const handleMouseDownNode = useCallback((e: React.MouseEvent, nodeId: string) => {
84 | if (e.target instanceof HTMLTextAreaElement || e.target instanceof HTMLInputElement) return;
85 | e.stopPropagation();
86 | const node = nodes[nodeId];
87 | // Convert world position to screen position to calculate offset
88 | const screenX = node.position.x * viewTransform.scale + viewTransform.x;
89 | const screenY = node.position.y * viewTransform.scale + viewTransform.y;
90 | draggingNode.current = {
91 | id: nodeId,
92 | offset: { x: e.clientX - screenX, y: e.clientY - screenY },
93 | };
94 | }, [nodes, viewTransform]);
95 |
96 | const handleMouseDownHandle = useCallback((e: React.MouseEvent, nodeId: string, handleId: string, handleType: 'input' | 'output') => {
97 | e.stopPropagation();
98 | if (handleType === 'output') {
99 | connectingEdge.current = { sourceNodeId: nodeId, sourceHandleId: handleId };
100 | setConnectingEdgeEnd(getHandlePosition(nodeId, handleId));
101 | }
102 | }, [getHandlePosition]);
103 |
104 | const handleResizeMouseDown = useCallback((e: React.MouseEvent, nodeId: string) => {
105 | e.stopPropagation();
106 | resizingNode.current = {
107 | id: nodeId,
108 | startX: e.clientX,
109 | startScale: nodes[nodeId].data.scale || 1,
110 | };
111 | },[nodes]);
112 |
113 | useEffect(() => {
114 | const handleMouseMove = (e: MouseEvent) => {
115 | if (resizingNode.current) {
116 | const { id, startX, startScale } = resizingNode.current;
117 | const dx = e.clientX - startX;
118 | const newScale = Math.max(0.5, Math.min(2.5, startScale + dx / 150));
119 | updateNodeData(id, { scale: newScale });
120 | } else if (panState.current) {
121 | const dx = e.clientX - panState.current.startX;
122 | const dy = e.clientY - panState.current.startY;
123 | setViewTransform(prev => ({ ...prev, x: prev.x + dx, y: prev.y + dy }));
124 | panState.current = { startX: e.clientX, startY: e.clientY };
125 | } else if (draggingNode.current) {
126 | const { id, offset } = draggingNode.current;
127 | const newPos = {
128 | x: (e.clientX - offset.x - viewTransform.x) / viewTransform.scale,
129 | y: (e.clientY - offset.y - viewTransform.y) / viewTransform.scale
130 | };
131 | setNodes(prev => ({ ...prev, [id]: { ...prev[id], position: newPos } }));
132 | } else if (connectingEdge.current) {
133 | const canvasRect = canvasRef.current?.getBoundingClientRect();
134 | if(canvasRect) {
135 | const worldX = (e.clientX - canvasRect.left - viewTransform.x) / viewTransform.scale;
136 | const worldY = (e.clientY - canvasRect.top - viewTransform.y) / viewTransform.scale;
137 | setConnectingEdgeEnd({ x: worldX, y: worldY });
138 | }
139 | }
140 | };
141 |
142 | const handleMouseUp = (e: MouseEvent) => {
143 | if (connectingEdge.current) {
144 | const target = e.target as HTMLElement;
145 | const targetHandle = target.closest('[data-handle-type="input"]');
146 |
147 | if (targetHandle) {
148 | const targetHandleId = targetHandle.id;
149 | const targetNodeElement = targetHandle.closest('[data-node-id]');
150 | const targetNodeId = targetNodeElement?.getAttribute('data-node-id');
151 | const { sourceNodeId, sourceHandleId } = connectingEdge.current;
152 |
153 | if (targetNodeId && sourceNodeId !== targetNodeId) {
154 | const newEdge: Edge = {
155 | id: crypto.randomUUID(),
156 | sourceNodeId,
157 | sourceHandleId,
158 | targetNodeId,
159 | targetHandleId,
160 | };
161 | setEdges(prev => ({ ...prev, [newEdge.id]: newEdge }));
162 | }
163 | }
164 | }
165 | if (panState.current && canvasRef.current) canvasRef.current.style.cursor = 'default';
166 |
167 | draggingNode.current = null;
168 | connectingEdge.current = null;
169 | panState.current = null;
170 | resizingNode.current = null;
171 | setConnectingEdgeEnd(null);
172 | };
173 |
174 | window.addEventListener('mousemove', handleMouseMove);
175 | window.addEventListener('mouseup', handleMouseUp);
176 |
177 | return () => {
178 | window.removeEventListener('mousemove', handleMouseMove);
179 | window.removeEventListener('mouseup', handleMouseUp);
180 | };
181 | }, [viewTransform, updateNodeData]);
182 |
183 | const handleWheel = (e: React.WheelEvent) => {
184 | e.preventDefault();
185 | const zoomFactor = 1.1;
186 | const { deltaY } = e;
187 | const { left, top } = canvasRef.current!.getBoundingClientRect();
188 |
189 | const mouseX = e.clientX - left;
190 | const mouseY = e.clientY - top;
191 |
192 | const newScale = deltaY < 0 ? viewTransform.scale * zoomFactor : viewTransform.scale / zoomFactor;
193 | const clampedScale = Math.max(0.2, Math.min(2.5, newScale));
194 |
195 | const worldX = (mouseX - viewTransform.x) / viewTransform.scale;
196 | const worldY = (mouseY - viewTransform.y) / viewTransform.scale;
197 |
198 | const newX = mouseX - worldX * clampedScale;
199 | const newY = mouseY - worldY * clampedScale;
200 |
201 | setViewTransform({ scale: clampedScale, x: newX, y: newY });
202 | };
203 |
204 | const handleCanvasMouseDown = (e: React.MouseEvent) => {
205 | if (e.button === 1 || (e.button === 0 && e.altKey)) {
206 | panState.current = { startX: e.clientX, startY: e.clientY };
207 | canvasRef.current!.style.cursor = 'grabbing';
208 | }
209 | };
210 |
211 |
212 | const runWorkflow = useCallback(async () => {
213 | setIsProcessing(true);
214 |
215 | setNodes(prev => {
216 | const newNodes = {...prev};
217 | Object.keys(newNodes).forEach(id => {
218 | newNodes[id].data.status = NodeStatus.IDLE;
219 | newNodes[id].data.errorMessage = undefined;
220 | if(newNodes[id].type !== NodeType.TEXT_INPUT && newNodes[id].type !== NodeType.IMAGE_INPUT) {
221 | newNodes[id].data.content = null;
222 | }
223 | });
224 | return newNodes;
225 | });
226 |
227 | const nodeIds = Object.keys(nodes);
228 | const adj: Record = nodeIds.reduce((acc, id) => ({ ...acc, [id]: [] }), {});
229 | const inDegree: Record = nodeIds.reduce((acc, id) => ({ ...acc, [id]: 0 }), {});
230 |
231 | Object.values(edges).forEach(edge => {
232 | adj[edge.sourceNodeId].push(edge.targetNodeId);
233 | inDegree[edge.targetNodeId]++;
234 | });
235 |
236 | const queue = nodeIds.filter(id => inDegree[id] === 0);
237 | const executionOrder: string[] = [];
238 | while (queue.length > 0) {
239 | const u = queue.shift()!;
240 | executionOrder.push(u);
241 | adj[u]?.forEach(v => {
242 | inDegree[v]--;
243 | if (inDegree[v] === 0) queue.push(v);
244 | });
245 | }
246 |
247 | const nodeOutputs: Record = {};
248 |
249 | for (const nodeId of executionOrder) {
250 | const node = nodes[nodeId];
251 | updateNodeData(nodeId, { status: NodeStatus.PROCESSING, content: { progress: 'Starting...' } });
252 |
253 | try {
254 | const inputEdges = Object.values(edges).filter(e => e.targetNodeId === nodeId);
255 | const inputs: Record = {};
256 | for (const edge of inputEdges) {
257 | inputs[edge.targetHandleId] = nodeOutputs[edge.sourceHandleId];
258 | }
259 |
260 | let output: any;
261 |
262 | switch (node.type) {
263 | case NodeType.TEXT_INPUT:
264 | case NodeType.IMAGE_INPUT:
265 | output = node.data.content;
266 | break;
267 | case NodeType.TEXT_GENERATOR:
268 | const prompt = inputs[`${nodeId}-input`];
269 | output = await geminiService.generateText(prompt);
270 | break;
271 | case NodeType.IMAGE_EDITOR: {
272 | const imageInput = inputs[`${nodeId}-input-image`];
273 | const textInput = inputs[`${nodeId}-input-text`];
274 |
275 | let imageFile: {data: string, mimeType: string} | null = null;
276 |
277 | if (imageInput instanceof File) {
278 | const part = await geminiService.utils.fileToGenerativePart(imageInput);
279 | imageFile = { data: part.inlineData.data, mimeType: part.inlineData.mimeType };
280 | } else if (typeof imageInput === 'string' && imageInput.startsWith('data:image')) {
281 | const [meta, base64] = imageInput.split(',');
282 | const mimeType = meta.split(':')[1].split(';')[0];
283 | imageFile = { data: base64, mimeType };
284 | }
285 |
286 | if (imageFile && textInput) {
287 | const result = await geminiService.editImage(imageFile.data, imageFile.mimeType, textInput);
288 | if (result && result.newBase64Image) {
289 | output = { image: `data:${imageFile.mimeType};base64,${result.newBase64Image}`, text: result.text };
290 | } else {
291 | throw new Error(result.text || "Image editing failed to produce an image.");
292 | }
293 | } else {
294 | throw new Error("Missing image or prompt for Image Editor.");
295 | }
296 | break;
297 | }
298 | case NodeType.VIDEO_GENERATOR: {
299 | const imageInput = inputs[`${nodeId}-input-image`];
300 | const textInput = inputs[`${nodeId}-input-text`];
301 | let base64: string | null = null;
302 | let mimeType: string | null = null;
303 |
304 | if (imageInput instanceof File) {
305 | const part = await geminiService.utils.fileToGenerativePart(imageInput);
306 | base64 = part.inlineData.data;
307 | mimeType = part.inlineData.mimeType;
308 | } else if (typeof imageInput === 'string' && imageInput.startsWith('data:image')) {
309 | const [meta, b64] = imageInput.split(',');
310 | mimeType = meta.split(':')[1].split(';')[0];
311 | base64 = b64;
312 | }
313 | output = await geminiService.generateVideo(base64, mimeType, textInput, (progress) => {
314 | updateNodeData(nodeId, { content: { progress } });
315 | });
316 | break;
317 | }
318 | case NodeType.OUTPUT_DISPLAY:
319 | output = inputs[`${nodeId}-input`];
320 | break;
321 | }
322 |
323 | updateNodeData(nodeId, { status: NodeStatus.COMPLETED, content: output });
324 |
325 | node.data.outputs.forEach(o => {
326 | if (node.type === NodeType.IMAGE_EDITOR && output && typeof output === 'object' && 'image' in output) {
327 | nodeOutputs[o.id] = o.type === 'image' ? output.image : output.text;
328 | } else {
329 | nodeOutputs[o.id] = output;
330 | }
331 | });
332 |
333 | } catch (error) {
334 | console.error("Workflow error at node", nodeId, error);
335 | const errorMessage = error instanceof Error ? error.message : String(error);
336 | updateNodeData(nodeId, { status: NodeStatus.ERROR, errorMessage });
337 | setIsProcessing(false);
338 | return;
339 | }
340 | }
341 |
342 | setIsProcessing(false);
343 | }, [nodes, edges, updateNodeData]);
344 |
345 | return (
346 |
347 |
348 |
354 |
355 |
372 |
373 |
374 |
381 |
395 |
396 | {Object.values(nodes).map(node => (
397 |
398 |
405 |
406 | ))}
407 |
408 |
409 |
410 | );
411 | };
412 |
413 | export default App;
--------------------------------------------------------------------------------