├── .env.example
├── .gitignore
├── .npmrc
├── .prettierrc
├── .vscode
└── launch.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── LICENSE.md
├── README.md
├── app
├── app.tsx
├── assets
│ ├── .gitignore
│ ├── default-fallback-image.png
│ └── era-preview.png
├── components
│ ├── ErrorBoundary.tsx
│ ├── ErrorFallback.tsx
│ ├── Header.tsx
│ ├── Layout.tsx
│ ├── ThemeToggle.tsx
│ ├── chat
│ │ ├── AdvancedSetting.tsx
│ │ ├── AutocompleteDropdown.tsx
│ │ ├── ChatInputArea.tsx
│ │ ├── GenerationStatus.tsx
│ │ ├── HighlightedText.tsx
│ │ ├── ResultsHeader.tsx
│ │ ├── SceneCard.tsx
│ │ ├── SearchInput.tsx
│ │ ├── SearchResults.tsx
│ │ ├── SearchResultsArea.tsx
│ │ └── SearchSuggestions.tsx
│ ├── index
│ │ ├── FeatureItem.tsx
│ │ ├── FeatureList.tsx
│ │ ├── FolderSelection.tsx
│ │ ├── IndexingProgress.tsx
│ │ └── IndexingSection.tsx
│ ├── settings
│ │ ├── SettingsInput.tsx
│ │ ├── SettingsSection.tsx
│ │ └── SettingsSwitch.tsx
│ ├── training
│ │ ├── KnownFacesGrid.tsx
│ │ ├── LabelingForm.tsx
│ │ └── UnknownFacesGrid.tsx
│ ├── ui
│ │ ├── badge.tsx
│ │ ├── button.tsx
│ │ ├── card.tsx
│ │ ├── input.tsx
│ │ ├── label.tsx
│ │ ├── progress.tsx
│ │ ├── select.tsx
│ │ ├── switch.tsx
│ │ └── tabs.tsx
│ └── videos
│ │ ├── FilterBar.tsx
│ │ ├── FilterGroup.tsx
│ │ ├── FilterOptions.tsx
│ │ ├── FilterSidebar.tsx
│ │ ├── VideoCard.tsx
│ │ ├── VideoGrid.tsx
│ │ ├── VideoList.tsx
│ │ ├── VideoMetadata.tsx
│ │ ├── VideoTags.tsx
│ │ └── VideoThumbnail.tsx
├── hooks
│ ├── use-conveyor.ts
│ ├── useClickOutside.tsx
│ ├── useDarkMode.ts
│ ├── useFaceExtraction.tsx
│ ├── useFilterSidebar.ts
│ ├── useFilteredFaces.tsx
│ ├── useGeneration.ts
│ ├── useSearch.ts
│ ├── useSearchSuggestions.ts
│ ├── useSettings.ts
│ ├── useTraining.ts
│ ├── useVideoCard.ts
│ ├── useVideoMetadata.ts
│ ├── useVideos.ts
│ └── useWelcome.ts
├── icons
│ ├── AsteriskIcon.tsx
│ ├── CodeWindowIcon.tsx
│ ├── ColorSchemeIcon.tsx
│ ├── ErrorIcon.tsx
│ ├── FanIcon.tsx
│ ├── FolderCheckIcon.tsx
│ ├── FolderIcon.tsx
│ ├── GearIcon.tsx
│ ├── IndexIcon.tsx
│ ├── PlayIcon.tsx
│ ├── ThunderIcon.tsx
│ └── VideoIcon.tsx
├── index.d.ts
├── index.html
├── pages
│ ├── Chat.tsx
│ ├── Index.tsx
│ ├── Settings.tsx
│ ├── Training.tsx
│ └── Videos.tsx
├── renderer.tsx
├── styles
│ ├── Chat.css
│ ├── FilterSidebar.css
│ ├── Layout.css
│ ├── Videos.css
│ ├── Welcome.css
│ ├── app.css
│ ├── globals.css
│ └── window.css
└── utils
│ └── search.ts
├── components.json
├── electron-builder.yml
├── electron.vite.config.ts
├── eslint.config.mjs
├── faces.json
├── known_faces.json
├── lib
├── constants
│ └── index.ts
├── conveyor
│ ├── README.md
│ ├── api
│ │ ├── app-api.ts
│ │ ├── index.ts
│ │ └── window-api.ts
│ ├── conveyor.d.ts
│ ├── handlers
│ │ ├── app-handler.ts
│ │ └── window-handler.ts
│ └── schemas
│ │ ├── app-schema.ts
│ │ ├── index.ts
│ │ ├── progress-schema.ts
│ │ └── window-schema.ts
├── main
│ ├── app.ts
│ ├── index.d.ts
│ ├── main.ts
│ ├── protocols.ts
│ └── shared.ts
├── preload
│ ├── preload.ts
│ └── shared.ts
├── services
│ ├── gemini.ts
│ ├── pythonService.ts
│ └── vectorDb.ts
├── types
│ ├── analysis.ts
│ ├── face.ts
│ ├── gopro.ts
│ ├── index.ts
│ ├── scene.ts
│ ├── search.ts
│ ├── settings.ts
│ ├── transcription.ts
│ ├── vector.ts
│ └── video.ts
├── utils.ts
└── utils
│ ├── embed.ts
│ ├── fcpxml.ts
│ ├── ffmpeg.ts
│ ├── file.ts
│ ├── frameAnalyze.ts
│ ├── gopro.ts
│ ├── location.ts
│ ├── scenes.ts
│ ├── search.ts
│ ├── sticher.ts
│ ├── time.ts
│ ├── transcribe.ts
│ └── videos.ts
├── package-lock.json
├── package.json
├── python
├── PLUGINS.md
├── add_face.py
├── analysis_service.py
├── analyze.py
├── batch_add_faces.py
├── face_recognizer.py
├── plugins
│ ├── activity.py
│ ├── base.py
│ ├── dominant_color.py
│ ├── emotion_detection.py
│ ├── environment.py
│ ├── face_recognition.py
│ ├── object_detection.py
│ ├── shot_type.py
│ └── text_detection.py
├── requirements.txt
└── transcribe.py
├── resources
├── build
│ ├── entitlements.mac.plist
│ ├── icon.icns
│ ├── icon.ico
│ ├── icon.png
│ └── icon.svg
└── icons
│ ├── electron.png
│ ├── era.svg
│ ├── react.png
│ ├── shadcn.png
│ ├── tailwind.png
│ └── vite.png
├── settings.json
├── tsconfig.json
├── tsconfig.node.json
└── tsconfig.web.json
/.env.example:
--------------------------------------------------------------------------------
1 | GEMINI_API_KEY=""
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | dist
3 | out
4 | .DS_Store
5 | *.log*
6 | temp
7 | .thumbnails
8 | python/.venv
9 | .results
10 | .env
11 | .faces
12 | analysis_results
13 | *.pt
14 | output-videos
15 | __pycache__
16 | .chroma_db
17 | .locations.json
18 | .venv
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | shamefully-hoist=true
2 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "singleQuote": true,
3 | "semi": false,
4 | "printWidth": 120,
5 | "trailingComma": "es5",
6 | "tabWidth": 2,
7 | "endOfLine": "auto",
8 | "proseWrap": "preserve",
9 | "quoteProps": "as-needed",
10 | "useTabs": false
11 | }
12 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.2.0",
3 | "configurations": [
4 | {
5 | "name": "Debug Main Process",
6 | "type": "node",
7 | "request": "launch",
8 | "cwd": "${workspaceRoot}",
9 | "runtimeExecutable": "${workspaceRoot}/node_modules/.bin/electron-vite",
10 | "windows": {
11 | "runtimeExecutable": "${workspaceRoot}/node_modules/.bin/electron-vite.exe"
12 | },
13 | "runtimeArgs": ["--sourcemap"],
14 | "env": {
15 | "REMOTE_DEBUGGING_PORT": "9222"
16 | }
17 | },
18 | {
19 | "name": "Debug Renderer Process",
20 | "port": 9222,
21 | "request": "attach",
22 | "type": "chrome",
23 | "webRoot": "${workspaceFolder}/app",
24 | "timeout": 60000,
25 | "presentation": {
26 | "hidden": true
27 | }
28 | }
29 | ],
30 | "compounds": [
31 | {
32 | "name": "Debug All",
33 | "configurations": ["Debug Main Process", "Debug Renderer Process"],
34 | "presentation": {
35 | "order": 1
36 | }
37 | }
38 | ]
39 | }
40 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Edit Mind
2 |
3 | We welcome contributions to Edit Mind! Whether you're fixing bugs, adding new features, improving documentation, or suggesting enhancements, your help is valuable. Please take a moment to review this document to understand how to contribute effectively.
4 |
5 | ## Code of Conduct
6 |
7 | We are committed to fostering an open and welcoming environment. Please review our [Code of Conduct](CODE_OF_CONDUCT.md) (placeholder for now) to understand the expectations for all contributors.
8 |
9 | ## How to Contribute
10 |
11 | ### 1. Reporting Bugs
12 |
13 | If you find a bug, please open an issue on our [GitHub Issues page](https://github.com/your-username/electron-react-app/issues). When reporting a bug, please include:
14 |
15 | - A clear and concise description of the bug.
16 | - Steps to reproduce the behavior.
17 | - Expected behavior.
18 | - Screenshots or video recordings if applicable.
19 | - Your operating system and application version.
20 |
21 | ### 2. Suggesting Enhancements
22 |
23 | We'd love to hear your ideas for improving Edit Mind! You can suggest enhancements by opening an issue on our [GitHub Issues page](https://github.com/your-username/electron-react-app/issues). Please include:
24 |
25 | - A clear and concise description of the proposed enhancement.
26 | - Why this enhancement would be useful.
27 | - Any mockups or examples if applicable.
28 |
29 | ### 3. Setting up Your Development Environment
30 |
31 | To get started with development, please follow the instructions in the [README.md](README.md) file under the "Getting Started" section.
32 |
33 | ### 4. Making Changes
34 |
35 | 1. **Fork the repository** and clone it to your local machine.
36 | 2. **Create a new branch** for your feature or bug fix:
37 | ```bash
38 | git checkout -b feature/your-feature-name
39 | # or
40 | git checkout -b bugfix/issue-description
41 | ```
42 | 3. **Make your changes.** Ensure your code adheres to the project's coding style and conventions.
43 | 4. **Test your changes.** Run existing tests and add new ones if necessary to cover your changes.
44 | 5. **Commit your changes** with a clear and descriptive commit message. Follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification if possible (e.g., `feat: add new plugin system`, `fix: resolve indexing error`).
45 | 6. **Push your branch** to your forked repository.
46 | 7. **Open a Pull Request** to the `main` branch of the original repository. Provide a detailed description of your changes and reference any related issues.
47 |
48 | ### 5. Coding Style
49 |
50 | - We use **ESLint** for JavaScript/TypeScript linting and **Prettier** for code formatting. Please ensure your code passes linting and is formatted correctly before submitting a pull request.
51 | - For Python code, adhere to **PEP 8** guidelines.
52 |
53 | ### 6. Testing
54 |
55 | - Before submitting a pull request, please ensure all existing tests pass.
56 | - If you're adding new features, please include appropriate unit and/or integration tests.
57 |
58 | ## Plugin Development
59 |
60 | Edit Mind features a powerful plugin system for extending its video analysis capabilities. If you're interested in creating new analyzer plugins:
61 |
62 | 1. Refer to the "Plugin System" section in the [README.md](README.md) for an overview.
63 | 2. Explore the existing plugins in the `python/plugins` directory for examples.
64 | 3. Ensure your plugin adheres to the `AnalyzerPlugin` interface defined in `python/plugins/base.py`.
65 |
66 | Thank you for contributing to Edit Mind!
67 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Guasam
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/app/app.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import { HashRouter as Router, Routes, Route } from 'react-router-dom'
3 | import { Chat } from './pages/Chat'
4 | import './styles/app.css'
5 | import { Index } from './pages/Index'
6 | import { Layout } from './components/Layout'
7 |
8 | import { Settings } from './pages/Settings'
9 | import { Training } from './pages/Training'
10 | import { Videos } from './pages/Videos'
11 |
12 | const App: React.FC = () => {
13 | return (
14 |
15 |
16 |
17 | } />
18 | } />
19 | } />
20 | } />
21 | } />
22 |
23 |
24 |
25 | )
26 | }
27 |
28 | export default App
29 |
--------------------------------------------------------------------------------
/app/assets/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/app/assets/.gitignore
--------------------------------------------------------------------------------
/app/assets/default-fallback-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/app/assets/default-fallback-image.png
--------------------------------------------------------------------------------
/app/assets/era-preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/app/assets/era-preview.png
--------------------------------------------------------------------------------
/app/components/ErrorBoundary.tsx:
--------------------------------------------------------------------------------
1 | import { ErrorBoundary as ReactErrorBoundary } from 'react-error-boundary';
2 | import { ErrorFallback } from './ErrorFallback';
3 |
4 | export const ErrorBoundary = ({ children }) => (
5 |
6 | {children}
7 |
8 | );
--------------------------------------------------------------------------------
/app/components/ErrorFallback.tsx:
--------------------------------------------------------------------------------
1 | import { FallbackProps } from 'react-error-boundary'
2 | import { Button } from './ui/Button'
3 | import { Badge } from './ui/Badge'
4 | import { ErrorIcon } from '../icons/ErrorIcon'
5 |
6 | export const ErrorFallback = ({ error, resetErrorBoundary }: FallbackProps) => {
7 | return (
8 |
9 |
10 |
15 |
16 |
17 |
18 |
Something went wrong
19 |
20 | An unexpected error occurred in the application. Please try refreshing the page or contact support if the
21 | problem persists.
22 |
23 |
24 |
25 | {error && (
26 |
27 |
28 |
29 |
37 | Error Details
38 |
39 |
40 |
41 |
42 |
43 |
44 | Error Message
45 |
46 |
47 | {error.message}
48 |
49 |
50 | {error.stack && (
51 |
52 |
53 | Stack Trace
54 |
55 |
56 | {error.stack}
57 |
58 |
59 | )}
60 |
61 |
62 |
63 | )}
64 |
65 |
66 |
77 |
83 |
84 |
85 |
86 |
87 | )
88 | }
89 |
--------------------------------------------------------------------------------
/app/components/Header.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Link, useLocation } from 'react-router-dom';
3 | import { IndexIcon } from '@/app/icons/IndexIcon';
4 | import { VideoIcon } from '@/app/icons/VideoIcon';
5 | import { GearIcon } from '@/app/icons/GearIcon';
6 |
7 | const navLinks = [
8 | { to: '/settings', icon: , text: 'Settings' },
9 | { to: '/training', icon: , text: 'Training' },
10 | { to: '/videos', icon: , text: 'Videos' },
11 | { to: '/chat', icon: , text: 'Chat', className: 'secondary' },
12 | { to: '/', icon: , text: 'Add Videos' },
13 | ];
14 |
15 | export const Header: React.FC = () => {
16 | const location = useLocation();
17 |
18 | const getNavLinks = () => {
19 | const isChatPage = location.pathname === '/chat';
20 | const isIndexingPage = location.pathname === '/';
21 |
22 | if (isChatPage) {
23 | return navLinks.filter((link) => link.to !== '/chat');
24 | }
25 | if (isIndexingPage) {
26 | return navLinks.filter((link) => link.to !== '/');
27 | }
28 | return navLinks;
29 | };
30 |
31 | return (
32 |
63 | );
64 | };
65 |
--------------------------------------------------------------------------------
/app/components/Layout.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import '@/app/styles/Layout.css';
3 | import { Header } from './Header';
4 |
5 | interface LayoutProps {
6 | children: React.ReactNode;
7 | }
8 |
9 | export const Layout: React.FC = ({ children }) => {
10 | return (
11 |
12 |
13 | {children}
14 |
15 | );
16 | };
--------------------------------------------------------------------------------
/app/components/ThemeToggle.tsx:
--------------------------------------------------------------------------------
1 | import { useDarkMode } from '@/app/hooks/useDarkMode';
2 | import { Badge } from './ui/Badge';
3 |
4 | export const ThemeToggle = () => {
5 | const [theme, toggleTheme] = useDarkMode();
6 |
7 | return (
8 |
9 |
14 | {theme === 'dark' ? 'Dark Mode' : 'Light Mode'}
15 |
16 |
17 | );
18 | };
19 |
--------------------------------------------------------------------------------
/app/components/chat/AutocompleteDropdown.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FaceData } from '@/lib/types/search';
3 |
4 | interface AutocompleteDropdownProps {
5 | filteredFaces: FaceData[];
6 | selectedAutocompleteIndex: number;
7 | onFaceClick: (faceName: string) => () => void;
8 | onFaceHover: (index: number) => () => void;
9 | autocompleteRef: React.RefObject;
10 | }
11 |
12 | export const AutocompleteDropdown: React.FC = ({
13 | filteredFaces,
14 | selectedAutocompleteIndex,
15 | onFaceClick,
16 | onFaceHover,
17 | autocompleteRef,
18 | }) => {
19 | return (
20 |
27 | {filteredFaces.map((face, index) => {
28 | const isSelected = index === selectedAutocompleteIndex;
29 | const initial = face.name.charAt(0).toUpperCase();
30 |
31 | return (
32 |
40 | {face.thumbnail ? (
41 |

47 | ) : (
48 |
49 | {initial}
50 |
51 | )}
52 |
53 |
54 | {face.name}
55 |
56 | {face.count} scene{face.count !== 1 ? 's' : ''}
57 |
58 |
59 |
60 | );
61 | })}
62 |
63 | );
64 | };
65 |
--------------------------------------------------------------------------------
/app/components/chat/ChatInputArea.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { SearchSuggestions } from './SearchSuggestions';
3 | import { SearchInput } from './SearchInput';
4 | import { AdvancedSetting } from './AdvancedSetting';
5 | import { Video } from '@/lib/types/video';
6 | import { SearchMetadata, VideoConfig } from '@/lib/types/search';
7 |
8 | interface ChatInputAreaProps {
9 | prompt: string;
10 | setPrompt: (prompt: string) => void;
11 | videos: Video[];
12 | textareaRef: React.RefObject;
13 | suggestions: any[];
14 | loadingSuggestions: boolean;
15 | handleSuggestionClick: (text: string) => void;
16 | showAdvancedSettings: boolean;
17 | setShowAdvancedSettings: (show: boolean) => void;
18 | videoConfig: VideoConfig;
19 | searchMetadata: SearchMetadata;
20 | setVideoConfig: (config: VideoConfig | ((prev: VideoConfig) => VideoConfig)) => void;
21 | handleSearch: () => void;
22 | searchLoading: boolean;
23 | }
24 |
25 | export const ChatInputArea: React.FC = ({
26 | prompt,
27 | setPrompt,
28 | videos,
29 | textareaRef,
30 | suggestions,
31 | loadingSuggestions,
32 | handleSuggestionClick,
33 | showAdvancedSettings,
34 | setShowAdvancedSettings,
35 | videoConfig,
36 | searchMetadata,
37 | setVideoConfig,
38 | handleSearch,
39 | searchLoading,
40 | }) => {
41 | return (
42 |
43 | {!prompt && suggestions.length > 0 && (
44 |
49 | )}
50 |
51 |
52 |
59 |
60 |
70 |
71 | );
72 | };
73 |
--------------------------------------------------------------------------------
/app/components/chat/GenerationStatus.tsx:
--------------------------------------------------------------------------------
1 | import { AnimatePresence, motion } from 'framer-motion'
2 | import confetti from 'canvas-confetti'
3 | import { useEffect } from 'react'
4 | import { GenerationResult } from '@/lib/types/search'
5 |
6 | interface GenerationStatusProps {
7 | generationStatus: string | null
8 | generationResult: GenerationResult | null
9 | onOpenVideo: () => void
10 | onShowInFinder: () => void
11 | }
12 |
13 | export const GenerationStatus = ({
14 | generationStatus,
15 | generationResult,
16 | onOpenVideo,
17 | onShowInFinder,
18 | }: GenerationStatusProps) => {
19 | useEffect(() => {
20 | if (generationResult) {
21 | confetti({
22 | particleCount: 100,
23 | spread: 70,
24 | origin: { y: 0.6 },
25 | colors: ['#ff7f50', '#ffdb4d', '#4caf50', '#2196f3'],
26 | })
27 | }
28 | }, [generationResult])
29 |
30 | if (!generationStatus) return null
31 |
32 | return (
33 |
41 | {generationStatus}
42 |
43 |
44 | {generationResult && (
45 |
53 |
59 | ▶️
60 | Open Video
61 |
62 |
63 |
69 | 📁
70 | Show in Finder
71 |
72 |
73 | )}
74 |
75 |
76 | )
77 | }
78 |
--------------------------------------------------------------------------------
/app/components/chat/HighlightedText.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { parseMentions } from '@/app/utils/search';
3 |
4 | interface HighlightedTextProps {
5 | text: string;
6 | }
7 |
8 | export const HighlightedText: React.FC = ({ text }) => {
9 | const highlightedText = parseMentions(text);
10 |
11 | return (
12 |
13 | {highlightedText.map((part, index) => (
14 |
15 | {part.text}
16 |
17 | ))}
18 |
19 | );
20 | };
21 |
--------------------------------------------------------------------------------
/app/components/chat/ResultsHeader.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { SortOrder } from './SearchResults';
3 |
4 | interface ResultsHeaderProps {
5 | resultsCount: number;
6 | allScenesSelected: boolean;
7 | toggleSelectAll: () => void;
8 | sortOrder: SortOrder;
9 | toggleSortOrder: () => void;
10 | }
11 |
12 | export const ResultsHeader: React.FC = ({
13 | resultsCount,
14 | allScenesSelected,
15 | toggleSelectAll,
16 | sortOrder,
17 | toggleSortOrder,
18 | }) => {
19 | return (
20 |
43 | );
44 | };
45 |
--------------------------------------------------------------------------------
/app/components/chat/SceneCard.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Scene } from '@/lib/types/scene';
3 |
4 | interface SceneCardProps {
5 | scene: Scene;
6 | index: number;
7 | isSelected: boolean;
8 | onSceneClick: (index: number) => (e: React.MouseEvent) => void;
9 | }
10 |
11 | const getFileName = (path: string): string => {
12 | return path.split('/').pop() ?? 'Unknown';
13 | };
14 |
15 | const formatTimeRange = (start: number, end: number): string => {
16 | return `${start.toFixed(1)}s - ${end.toFixed(1)}s`;
17 | };
18 |
19 | export const SceneCard: React.FC = ({ scene, index, isSelected, onSceneClick }) => {
20 | const sceneKey = `${scene.source}-${scene.startTime}-${index}`;
21 | const fileName = getFileName(scene.source);
22 | const timeRange = formatTimeRange(scene.startTime, scene.endTime);
23 |
24 | return (
25 |
33 | {scene.thumbnailUrl ? (
34 |

40 | ) : (
41 |
42 | No Preview
43 |
44 | )}
45 |
46 |
47 |
48 | {fileName}
49 |
50 |
{timeRange}
51 |
{scene.camera}
52 |
{scene.createdAt}
53 |
54 |
55 | {isSelected && (
56 |
67 | )}
68 |
69 | );
70 | };
71 |
--------------------------------------------------------------------------------
/app/components/chat/SearchResults.tsx:
--------------------------------------------------------------------------------
1 | import { Scene } from '@/lib/types/scene';
2 | import { motion } from 'framer-motion';
3 | import type { FC, MouseEvent } from 'react';
4 | import { ResultsHeader } from './ResultsHeader';
5 | import { SceneCard } from './SceneCard';
6 |
7 | export type SortOrder = 'asc' | 'desc';
8 |
9 | interface SearchResultsProps {
10 | sortedSearchResults: readonly Scene[];
11 | allScenesSelected: boolean;
12 | toggleSelectAll: () => void;
13 | sortOrder: SortOrder;
14 | setSortOrder: (order: SortOrder) => void;
15 | toggleSceneSelection: (index: number) => void;
16 | selectedScenes: ReadonlySet;
17 | handleGenerateRoughCut: () => void | Promise;
18 | loading: boolean;
19 | }
20 |
21 | export const SearchResults: FC = ({
22 | sortedSearchResults,
23 | allScenesSelected,
24 | toggleSelectAll,
25 | sortOrder,
26 | setSortOrder,
27 | toggleSceneSelection,
28 | selectedScenes,
29 | handleGenerateRoughCut,
30 | loading,
31 | }) => {
32 | const hasResults = sortedSearchResults.length > 0;
33 | const hasSelection = selectedScenes.size > 0;
34 | const resultsCount = sortedSearchResults.length;
35 | const selectedCount = selectedScenes.size;
36 |
37 | const toggleSortOrder = (): void => {
38 | setSortOrder(sortOrder === 'desc' ? 'asc' : 'desc');
39 | };
40 |
41 | const handleSceneClick =
42 | (index: number) =>
43 | (e: MouseEvent): void => {
44 | e.stopPropagation();
45 | toggleSceneSelection(index);
46 | };
47 |
48 | if (!hasResults) {
49 | return null;
50 | }
51 |
52 | return (
53 |
54 |
61 |
62 |
63 | {sortedSearchResults.map((scene, index) => (
64 |
71 | ))}
72 |
73 |
74 |
82 | {loading ? (
83 | <>
84 |
85 | Generating...
86 | >
87 | ) : (
88 | `Generate Rough Cut (${selectedCount} scene${selectedCount !== 1 ? 's' : ''})`
89 | )}
90 |
91 |
92 | );
93 | };
--------------------------------------------------------------------------------
/app/components/chat/SearchResultsArea.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { GenerationStatus } from './GenerationStatus';
3 | import { SearchResults, SortOrder } from './SearchResults';
4 | import { Scene } from '@/lib/types/scene';
5 | import { GenerationResult } from '@/lib/types/search';
6 |
7 | interface SearchResultsAreaProps {
8 | generationStatus: string | null;
9 | generationResult: GenerationResult | null;
10 | handleOpenVideo: () => void;
11 | handleShowInFinder: () => void;
12 | sortedSearchResults: readonly Scene[];
13 | sortOrder: SortOrder;
14 | setSortOrder: (order: SortOrder) => void;
15 | handleGenerateRoughCut: () => void | Promise;
16 | toggleSelectAll: () => void;
17 | generationLoading: boolean;
18 | selectedScenes: ReadonlySet;
19 | toggleSceneSelection: (index: number) => void;
20 | allScenesSelected: boolean;
21 | }
22 |
23 | export const SearchResultsArea: React.FC = ({
24 | generationStatus,
25 | generationResult,
26 | handleOpenVideo,
27 | handleShowInFinder,
28 | sortedSearchResults,
29 | sortOrder,
30 | setSortOrder,
31 | handleGenerateRoughCut,
32 | toggleSelectAll,
33 | generationLoading,
34 | selectedScenes,
35 | toggleSceneSelection,
36 | allScenesSelected,
37 | }) => {
38 | return (
39 | <>
40 |
46 |
47 |
58 | >
59 | );
60 | };
61 |
--------------------------------------------------------------------------------
/app/components/chat/SearchSuggestions.tsx:
--------------------------------------------------------------------------------
1 | import { SearchSuggestion } from '@/lib/types/search'
2 | import type { FC, MouseEvent } from 'react'
3 |
4 | interface SearchSuggestionsProps {
5 | loadingSuggestions: boolean
6 | suggestions: readonly SearchSuggestion[]
7 | handleSuggestionClick: (text: string) => void
8 | }
9 |
10 | const SKELETON_COUNT = 5 as const
11 |
12 | export const SearchSuggestions: FC = ({
13 | loadingSuggestions,
14 | suggestions,
15 | handleSuggestionClick,
16 | }) => {
17 | const onSuggestionClick =
18 | (text: string) =>
19 | (e: MouseEvent): void => {
20 | e.preventDefault()
21 | handleSuggestionClick(text)
22 | }
23 |
24 | return (
25 |
26 |
27 | {loadingSuggestions ? 'Analyzing your videos...' : 'Try searching for:'}
28 |
29 |
30 |
31 | {loadingSuggestions
32 | ? Array.from({ length: SKELETON_COUNT }, (_, i) => (
33 |
37 | ))
38 | : suggestions.map((suggestion) => (
39 |
51 | ))}
52 |
53 |
54 | )
55 | }
56 |
--------------------------------------------------------------------------------
/app/components/index/FeatureItem.tsx:
--------------------------------------------------------------------------------
1 | import { motion } from 'framer-motion';
2 |
3 | export const FeatureItem = ({ icon, title, description }: { icon: React.ReactNode; title: string; description: string }) => {
4 | return (
5 |
11 | {icon}
12 |
13 |
{title}
14 |
{description}
15 |
16 |
17 | )
18 | }
19 |
--------------------------------------------------------------------------------
/app/components/index/FeatureList.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FeatureItem } from './FeatureItem';
3 | import { VideoIcon } from '@/app/icons/VideoIcon';
4 | import { IndexIcon } from '@/app/icons/IndexIcon';
5 | import { ThunderIcon } from '@/app/icons/ThunderIcon';
6 |
7 | export const FeatureList: React.FC = () => {
8 | return (
9 |
10 | }
12 | title="Auto-Detection"
13 | description="Automatically finds all video files"
14 | />
15 | }
17 | title="Smart Indexing"
18 | description="Creates searchable metadata"
19 | />
20 | }
22 | title="Create rough cuts"
23 | description="Create clips based on your prompt"
24 | />
25 |
26 | );
27 | };
28 |
--------------------------------------------------------------------------------
/app/components/index/FolderSelection.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Button } from '@/app/components/ui/Button';
3 | import { FolderIcon, FolderCheckIcon } from 'lucide-react';
4 |
5 | interface FolderSelectionProps {
6 | selectedFolder: string | null;
7 | videos: string[];
8 | handleSelectFolder: () => void;
9 | handleCancelIndexing: () => void;
10 | }
11 |
12 | export const FolderSelection: React.FC = ({
13 | selectedFolder,
14 | videos,
15 | handleSelectFolder,
16 | handleCancelIndexing,
17 | }) => {
18 | return (
19 |
20 | {!selectedFolder ? (
21 |
25 | ) : (
26 |
27 |
28 |
29 |
30 | Selected folder:
31 | {selectedFolder}
32 | {videos.length} videos
33 |
34 |
35 |
38 |
39 | )}
40 |
41 | );
42 | };
43 |
--------------------------------------------------------------------------------
/app/components/index/IndexingSection.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Button } from '@/app/components/ui/Button';
3 | import { PlayIcon } from 'lucide-react';
4 | import { IndexingProgress, IndexingProgressProps } from '../IndexingProgress';
5 |
6 | interface IndexingSectionProps {
7 | isIndexing: boolean;
8 | indexingProgress: IndexingProgressProps | null;
9 | handleStartIndexing: () => void;
10 | }
11 |
12 | export const IndexingSection: React.FC = ({
13 | isIndexing,
14 | indexingProgress,
15 | handleStartIndexing,
16 | }) => {
17 | return (
18 |
19 | {!isIndexing ? (
20 |
24 | ) : (
25 | indexingProgress &&
26 | )}
27 |
28 | );
29 | };
30 |
--------------------------------------------------------------------------------
/app/components/settings/SettingsInput.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Input } from '@/app/components/ui/Input';
3 | import { Label } from '@/app/components/ui/Label';
4 |
5 | interface SettingsInputProps {
6 | id: string;
7 | label: string;
8 | type?: string;
9 | value: string | number;
10 | onChange: (e: React.ChangeEvent) => void;
11 | step?: string;
12 | min?: number;
13 | max?: number;
14 | }
15 |
16 | export const SettingsInput: React.FC = ({ id, label, type = 'text', value, onChange, ...props }) => {
17 | return (
18 |
19 |
20 |
21 |
22 | );
23 | };
24 |
--------------------------------------------------------------------------------
/app/components/settings/SettingsSection.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { CardHeader, CardTitle, CardContent } from '@/app/components/ui/Card';
3 |
4 | interface SettingsSectionProps {
5 | title: string;
6 | children: React.ReactNode;
7 | }
8 |
9 | export const SettingsSection: React.FC = ({ title, children }) => {
10 | return (
11 | <>
12 |
13 | {title}
14 |
15 |
16 | {children}
17 |
18 | >
19 | );
20 | };
21 |
--------------------------------------------------------------------------------
/app/components/settings/SettingsSwitch.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Switch } from '@/app/components/ui/Switch';
3 | import { Label } from '@/app/components/ui/Label';
4 |
5 | interface SettingsSwitchProps {
6 | id: string;
7 | label: string;
8 | checked: boolean;
9 | onCheckedChange: (checked: boolean) => void;
10 | }
11 |
12 | export const SettingsSwitch: React.FC = ({ id, label, checked, onCheckedChange }) => {
13 | return (
14 |
15 |
16 |
17 |
18 | );
19 | };
20 |
--------------------------------------------------------------------------------
/app/components/training/KnownFacesGrid.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { KnownFace } from '@/lib/types/face';
3 | import { UserPlus } from 'lucide-react';
4 | import { Badge } from '@/app/components/ui/Badge';
5 |
6 | interface KnownFacesGridProps {
7 | knownFaces: KnownFace[];
8 | }
9 |
10 | export const KnownFacesGrid: React.FC = ({ knownFaces }) => {
11 | return (
12 |
13 | {knownFaces.map((face) => (
14 |
15 | {face.images.length > 0 ? (
16 |

17 | ) : (
18 |
19 |
20 |
21 | )}
22 |
23 |
{face.name}
24 |
25 | {face.images.length} samples
26 |
27 |
28 |
29 | ))}
30 |
31 | );
32 | };
33 |
--------------------------------------------------------------------------------
/app/components/training/LabelingForm.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Button } from '@/app/components/ui/Button';
3 | import { Input } from '@/app/components/ui/Input';
4 | import { Label } from '@/app/components/ui/Label';
5 | import { Tabs, TabsList, TabsTrigger } from '@/app/components/ui/Tabs';
6 | import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/app/components/ui/Select';
7 | import { Loader2, Check } from 'lucide-react';
8 | import { KnownFace } from '@/lib/types/face';
9 |
10 | interface LabelingFormProps {
11 | selectedFacesCount: number;
12 | handleSelectAll: () => void;
13 | labelMode: 'existing' | 'new';
14 | setLabelMode: (mode: 'existing' | 'new') => void;
15 | selectedKnownFace: string;
16 | setSelectedKnownFace: (name: string) => void;
17 | knownFaces: KnownFace[];
18 | newFaceName: string;
19 | setNewFaceName: (name: string) => void;
20 | handleLabelFaces: () => void;
21 | isLabeling: boolean;
22 | unknownFacesCount: number;
23 | }
24 |
25 | export const LabelingForm: React.FC = ({
26 | selectedFacesCount,
27 | handleSelectAll,
28 | labelMode,
29 | setLabelMode,
30 | selectedKnownFace,
31 | setSelectedKnownFace,
32 | knownFaces,
33 | newFaceName,
34 | setNewFaceName,
35 | handleLabelFaces,
36 | isLabeling,
37 | unknownFacesCount,
38 | }) => {
39 | return (
40 |
41 |
42 |
43 |
46 |
47 |
48 |
49 |
50 |
51 | setLabelMode(value as 'existing' | 'new')}
54 | className="w-full mt-2"
55 | >
56 |
57 | Use Existing Name
58 | Create New Name
59 |
60 |
61 |
62 |
63 | {labelMode === 'existing' ? (
64 |
65 |
66 |
78 |
79 | ) : (
80 |
81 |
82 | setNewFaceName(e.target.value)}
87 | className="mt-2"
88 | />
89 |
90 | )}
91 |
92 |
114 |
115 |
116 | );
117 | };
118 |
--------------------------------------------------------------------------------
/app/components/training/UnknownFacesGrid.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { UnknownFace } from '@/lib/types/face';
3 | import { Check, X } from 'lucide-react';
4 |
5 | interface UnknownFacesGridProps {
6 | unknownFaces: UnknownFace[];
7 | selectedFaces: Set;
8 | handleSelectFace: (image_hash: string) => void;
9 | handleDeleteUnknownFace: (face: UnknownFace) => void;
10 | }
11 |
12 | export const UnknownFacesGrid: React.FC = ({
13 | unknownFaces,
14 | selectedFaces,
15 | handleSelectFace,
16 | handleDeleteUnknownFace,
17 | }) => {
18 | return (
19 |
20 | {unknownFaces.map((face) => (
21 |
handleSelectFace(face.image_hash)}
29 | >
30 | {selectedFaces.has(face.image_hash) && (
31 |
32 |
33 |
34 | )}
35 |
36 |
46 |
47 |

52 |
53 |
Video: {face.video_name}
54 |
Time: {face.formatted_timestamp}
55 |
56 |
57 | ))}
58 |
59 | );
60 | };
61 |
--------------------------------------------------------------------------------
/app/components/ui/badge.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react'
2 | import { Slot } from '@radix-ui/react-slot'
3 | import { cva, type VariantProps } from 'class-variance-authority'
4 |
5 | import { cn } from '@/lib/utils'
6 |
7 | const badgeVariants = cva(
8 | 'inline-flex items-center justify-center rounded-md border px-2 py-0.5 text-xs font-medium w-fit whitespace-nowrap shrink-0 [&>svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden',
9 | {
10 | variants: {
11 | variant: {
12 | default: 'border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90',
13 | secondary: 'border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90',
14 | destructive:
15 | 'border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60',
16 | outline: 'text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground',
17 | },
18 | },
19 | defaultVariants: {
20 | variant: 'default',
21 | },
22 | }
23 | )
24 |
25 | const Badge = ({ className, variant, asChild = false, ...props }: React.ComponentProps<'span'> & VariantProps & { asChild?: boolean }) => {
26 | const Comp = asChild ? Slot : 'span'
27 |
28 | return
29 | }
30 |
31 | export { Badge, badgeVariants }
32 |
--------------------------------------------------------------------------------
/app/components/ui/button.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react'
2 | import { Slot } from '@radix-ui/react-slot'
3 | import { cva, type VariantProps } from 'class-variance-authority'
4 |
5 | import { cn } from '@/lib/utils'
6 |
7 | const buttonVariants = cva(
8 | "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
9 | {
10 | variants: {
11 | variant: {
12 | default: 'bg-primary text-primary-foreground shadow-xs hover:bg-primary/90',
13 | destructive:
14 | 'bg-destructive text-white shadow-xs hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60',
15 | outline:
16 | 'border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50',
17 | secondary: 'bg-secondary text-secondary-foreground shadow-xs hover:bg-secondary/80',
18 | ghost: 'hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50',
19 | link: 'text-primary underline-offset-4 hover:underline',
20 | },
21 | size: {
22 | default: 'h-9 px-4 py-2 has-[>svg]:px-3',
23 | sm: 'h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5',
24 | lg: 'h-10 rounded-md px-6 has-[>svg]:px-4',
25 | icon: 'size-9',
26 | },
27 | },
28 | defaultVariants: {
29 | variant: 'default',
30 | size: 'default',
31 | },
32 | }
33 | )
34 |
35 | const Button = ({ className, variant, size, asChild = false, ...props }: React.ComponentProps<'button'> & VariantProps & { asChild?: boolean }) => {
36 | const Comp = asChild ? Slot : 'button'
37 |
38 | return
39 | }
40 |
41 | export { Button, buttonVariants }
42 |
--------------------------------------------------------------------------------
/app/components/ui/card.tsx:
--------------------------------------------------------------------------------
1 |
2 | import * as React from "react"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 |
7 | const Card = React.forwardRef>(({ className, ...props }, ref) => (
8 |
13 | ))
14 | Card.displayName = "Card"
15 |
16 | const CardHeader = React.forwardRef>(({ className, ...props }, ref) => (
17 |
22 | ))
23 | CardHeader.displayName = "CardHeader"
24 |
25 | const CardTitle = React.forwardRef>(({ className, ...props }, ref) => (
26 |
31 | ))
32 | CardTitle.displayName = "CardTitle"
33 |
34 | const CardDescription = React.forwardRef>(({ className, ...props }, ref) => (
35 |
40 | ))
41 | CardDescription.displayName = "CardDescription"
42 |
43 | const CardContent = React.forwardRef>(({ className, ...props }, ref) => (
44 |
45 | ))
46 | CardContent.displayName = "CardContent"
47 |
48 | const CardFooter = React.forwardRef>(({ className, ...props }, ref) => (
49 |
54 | ))
55 | CardFooter.displayName = "CardFooter"
56 |
57 | export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }
58 |
--------------------------------------------------------------------------------
/app/components/ui/input.tsx:
--------------------------------------------------------------------------------
1 |
2 | import * as React from "react"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 | export type InputProps = React.InputHTMLAttributes;
7 |
8 | const Input = React.forwardRef(
9 | ({ className, type, ...props }, ref) => {
10 | return (
11 |
20 | )
21 | }
22 | )
23 | Input.displayName = "Input"
24 |
25 | export { Input }
26 |
--------------------------------------------------------------------------------
/app/components/ui/label.tsx:
--------------------------------------------------------------------------------
1 |
2 | import * as React from "react"
3 | import * as LabelPrimitive from "@radix-ui/react-label"
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const Label = React.forwardRef<
8 | React.ElementRef,
9 | React.ComponentPropsWithoutRef
10 | >(({ className, ...props }, ref) => (
11 |
19 | ))
20 | Label.displayName = LabelPrimitive.Root.displayName
21 |
22 | export { Label }
23 |
--------------------------------------------------------------------------------
/app/components/ui/progress.tsx:
--------------------------------------------------------------------------------
1 |
2 | import * as React from "react"
3 | import * as ProgressPrimitive from "@radix-ui/react-progress"
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const Progress = React.forwardRef<
8 | React.ElementRef,
9 | React.ComponentPropsWithoutRef
10 | >(({ className, value, ...props }, ref) => (
11 |
19 |
23 |
24 | ))
25 | Progress.displayName = ProgressPrimitive.Root.displayName
26 |
27 | export { Progress }
28 |
--------------------------------------------------------------------------------
/app/components/ui/switch.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react'
2 | import * as SwitchPrimitive from '@radix-ui/react-switch'
3 |
4 | import { cn } from '@/lib/utils'
5 |
6 | const Switch = ({ className, ...props }: React.ComponentProps) => {
7 | return (
8 |
16 |
22 |
23 | )
24 | }
25 |
26 | export { Switch }
27 |
--------------------------------------------------------------------------------
/app/components/ui/tabs.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as TabsPrimitive from "@radix-ui/react-tabs"
3 |
4 | import { cn } from "@/lib/utils"
5 |
6 | const Tabs = TabsPrimitive.Root
7 |
8 | const TabsList = React.forwardRef,
9 | React.ComponentPropsWithoutRef
10 | >(({ className, ...props }, ref) => (
11 |
19 | ))
20 | TabsList.displayName = TabsPrimitive.List.displayName
21 |
22 | const TabsTrigger = React.forwardRef<
23 | React.ElementRef,
24 | React.ComponentPropsWithoutRef
25 | >(({ className, ...props }, ref) => (
26 |
34 | ))
35 | TabsTrigger.displayName = TabsPrimitive.Trigger.displayName
36 |
37 | const TabsContent = React.forwardRef<
38 | React.ElementRef,
39 | React.ComponentPropsWithoutRef
40 | >(({ className, ...props }, ref) => (
41 |
49 | ))
50 | TabsContent.displayName = TabsPrimitive.Content.displayName
51 |
52 | export { Tabs, TabsList, TabsTrigger, TabsContent }
--------------------------------------------------------------------------------
/app/components/videos/FilterBar.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Search, Grid3x3, List, SlidersHorizontal, X } from 'lucide-react';
3 | import { Button } from '@/app/components/ui/Button';
4 | import { Input } from '@/app/components/ui/Input';
5 |
6 | type ViewMode = 'grid' | 'list';
7 |
8 | interface FilterBarProps {
9 | searchQuery: string;
10 | setSearchQuery: (query: string) => void;
11 | viewMode: ViewMode;
12 | setViewMode: (mode: ViewMode) => void;
13 | showFilters: boolean;
14 | setShowFilters: (show: boolean) => void;
15 | activeFiltersCount: number;
16 | }
17 |
18 | export const FilterBar: React.FC = ({
19 | searchQuery,
20 | setSearchQuery,
21 | viewMode,
22 | setViewMode,
23 | showFilters,
24 | setShowFilters,
25 | activeFiltersCount,
26 | }) => {
27 | return (
28 |
29 |
30 |
Video Library
31 |
32 |
33 |
34 |
35 |
36 | setSearchQuery(e.target.value)}
41 | className="search-input"
42 | />
43 | {searchQuery && (
44 |
47 | )}
48 |
49 |
50 |
51 |
58 |
65 |
66 |
67 |
72 |
73 |
74 | );
75 | };
76 |
--------------------------------------------------------------------------------
/app/components/videos/FilterGroup.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { ChevronDown, ChevronRight } from 'lucide-react';
3 | import { Button } from '@/app/components/ui/Button';
4 | import { Input } from '@/app/components/ui/Input';
5 | import { FilterOptions } from './FilterOptions';
6 |
7 | interface FilterGroupProps {
8 | category: string;
9 | values: string[];
10 | selectedValues: string[];
11 | isExpanded: boolean;
12 | searchTerm: string;
13 | onToggle: () => void;
14 | onSearchChange: (value: string) => void;
15 | onFilterChange: (value: string) => void;
16 | onClear: () => void;
17 | getCategoryLabel: (category: string) => string;
18 | }
19 |
20 | export const FilterGroup: React.FC = ({
21 | category,
22 | values,
23 | selectedValues,
24 | isExpanded,
25 | searchTerm,
26 | onToggle,
27 | onSearchChange,
28 | onFilterChange,
29 | onClear,
30 | getCategoryLabel,
31 | }) => {
32 | const selectedCount = selectedValues.length;
33 |
34 | return (
35 |
36 |
56 |
57 | {isExpanded && (
58 |
59 | {values.length > 5 && (
60 | onSearchChange(e.target.value)}
65 | className="filter-search"
66 | />
67 | )}
68 |
73 |
74 | )}
75 |
76 | );
77 | };
78 |
--------------------------------------------------------------------------------
/app/components/videos/FilterOptions.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | interface FilterOptionsProps {
4 | values: string[];
5 | selectedValues: string[];
6 | onFilterChange: (value: string) => void;
7 | }
8 |
9 | export const FilterOptions: React.FC = ({ values, selectedValues, onFilterChange }) => {
10 | if (values.length === 0) {
11 | return No matches found
;
12 | }
13 |
14 | return (
15 |
16 | {values.map((value) => {
17 | const isSelected = selectedValues.includes(value);
18 | return (
19 |
27 | );
28 | })}
29 |
30 | );
31 | };
32 |
--------------------------------------------------------------------------------
/app/components/videos/FilterSidebar.tsx:
--------------------------------------------------------------------------------
1 | import '@/app/styles/FilterSidebar.css'
2 | import React from 'react';
3 | import { X } from 'lucide-react';
4 | import { Button } from '@/app/components/ui/Button';
5 | import { useFilterSidebar } from '@/app/hooks/useFilterSidebar';
6 | import { FilterGroup } from './FilterGroup';
7 |
8 | interface FilterSidebarProps {
9 | filters: Record;
10 | selectedFilters: Record;
11 | onFilterChange: (filters: Record) => void;
12 | onClose: () => void;
13 | }
14 |
15 | const getCategoryLabel = (category: string) => {
16 | const labels: Record = {
17 | cameras: 'Cameras',
18 | colors: 'Colors',
19 | locations: 'Locations',
20 | faces: 'People',
21 | objects: 'Objects',
22 | shotTypes: 'Shot Types',
23 | };
24 | return labels[category] || category;
25 | };
26 |
27 | export const FilterSidebar: React.FC = ({
28 | filters,
29 | selectedFilters,
30 | onFilterChange,
31 | onClose,
32 | }) => {
33 | const {
34 | expandedCategories,
35 | searchTerms,
36 | toggleCategory,
37 | handleSearchTermChange,
38 | getFilteredValues,
39 | } = useFilterSidebar(filters);
40 |
41 | const handleFilterChange = (category: string, value: string) => {
42 | const newFilters = { ...selectedFilters };
43 | if (!newFilters[category]) {
44 | newFilters[category] = [];
45 | }
46 |
47 | if (newFilters[category].includes(value)) {
48 | newFilters[category] = newFilters[category].filter((item) => item !== value);
49 | } else {
50 | newFilters[category].push(value);
51 | }
52 |
53 | onFilterChange(newFilters);
54 | };
55 |
56 | const clearCategory = (category: string) => {
57 | const newFilters = { ...selectedFilters };
58 | newFilters[category] = [];
59 | onFilterChange(newFilters);
60 | };
61 |
62 | return (
63 |
64 |
65 |
Filters
66 |
67 |
68 |
69 |
70 |
71 |
72 | {Object.entries(filters).map(([category, values]) => (
73 | toggleCategory(category)}
81 | onSearchChange={(value) => handleSearchTermChange(category, value)}
82 | onFilterChange={(value) => handleFilterChange(category, value)}
83 | onClear={() => clearCategory(category)}
84 | getCategoryLabel={getCategoryLabel}
85 | />
86 | ))}
87 |
88 |
89 | );
90 | };
91 |
--------------------------------------------------------------------------------
/app/components/videos/VideoCard.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Video } from '@/lib/types/video';
3 | import { useVideoCard } from '../../hooks/useVideoCard';
4 | import { VideoThumbnail } from './VideoThumbnail';
5 | import { VideoMetadata } from './VideoMetadata';
6 | import { VideoTags } from './VideoTags';
7 |
8 | interface VideoCardProps {
9 | video: Video;
10 | viewMode: 'grid' | 'list';
11 | }
12 |
13 | export const VideoCard: React.FC = ({ video, viewMode }) => {
14 | const { duration, sceneCount, uniqueFaces, uniqueLocations, dominantColor, videoTitle } = useVideoCard(video);
15 |
16 | return (
17 |
18 |
19 |
20 |
21 |
22 | {videoTitle}
23 |
24 |
25 |
33 |
34 | {viewMode === 'list' && }
35 |
36 |
37 | );
38 | };
39 |
--------------------------------------------------------------------------------
/app/components/videos/VideoGrid.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Video } from '@/lib/types/video';
3 | import { VideoCard } from './VideoCard';
4 |
5 | interface VideoGridProps {
6 | videos: Video[];
7 | }
8 |
9 | export const VideoGrid: React.FC = ({ videos }) => {
10 | return (
11 |
12 | {videos.map((video) => (
13 |
14 | ))}
15 |
16 | );
17 | };
18 |
--------------------------------------------------------------------------------
/app/components/videos/VideoList.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Video } from '@/lib/types/video';
3 | import { VideoCard } from './VideoCard';
4 |
5 | interface VideoListProps {
6 | videos: Video[];
7 | }
8 |
9 | export const VideoList: React.FC = ({ videos }) => {
10 | return (
11 |
12 | {videos.map((video) => (
13 |
14 | ))}
15 |
16 | );
17 | };
18 |
--------------------------------------------------------------------------------
/app/components/videos/VideoMetadata.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Camera, User, MapPin, Palette } from 'lucide-react';
3 |
4 | interface VideoMetadataProps {
5 | camera?: string;
6 | sceneCount: number;
7 | uniqueFaces: string[];
8 | uniqueLocations: string[];
9 | dominantColor: { name: string; hex: string } | null;
10 | viewMode: 'grid' | 'list';
11 | }
12 |
13 | export const VideoMetadata: React.FC = ({
14 | camera,
15 | sceneCount,
16 | uniqueFaces,
17 | uniqueLocations,
18 | dominantColor,
19 | viewMode,
20 | }) => {
21 | return (
22 |
23 | {camera && (
24 |
25 |
26 | {camera}
27 |
28 | )}
29 |
30 | {sceneCount > 0 && (
31 |
32 | Scenes:
33 | {sceneCount}
34 |
35 | )}
36 |
37 | {uniqueFaces.length > 0 && (
38 |
39 |
40 |
41 | {uniqueFaces.length} {uniqueFaces.length === 1 ? 'person' : 'people'}
42 |
43 |
44 | )}
45 |
46 | {uniqueLocations.length > 0 && (
47 |
48 |
49 |
50 | {uniqueLocations.length === 1 ? uniqueLocations[0] : `${uniqueLocations.length} locations`}
51 |
52 |
53 | )}
54 |
55 | {dominantColor && viewMode === 'list' && (
56 |
57 |
58 |
59 |
60 | {dominantColor.name}
61 |
62 |
63 | )}
64 |
65 | );
66 | };
67 |
--------------------------------------------------------------------------------
/app/components/videos/VideoTags.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { User, MapPin } from 'lucide-react';
3 |
4 | interface VideoTagsProps {
5 | uniqueFaces: string[];
6 | uniqueLocations: string[];
7 | }
8 |
9 | export const VideoTags: React.FC = ({ uniqueFaces, uniqueLocations }) => {
10 | return (
11 |
12 | {uniqueFaces.length > 0 && (
13 |
14 | {uniqueFaces.slice(0, 5).map((face) => (
15 |
16 |
17 | {face}
18 |
19 | ))}
20 | {uniqueFaces.length > 5 && +{uniqueFaces.length - 5}}
21 |
22 | )}
23 |
24 | {uniqueLocations.length > 0 && (
25 |
26 | {uniqueLocations.slice(0, 3).map((location) => (
27 |
28 |
29 | {location}
30 |
31 | ))}
32 | {uniqueLocations.length > 3 && +{uniqueLocations.length - 3}}
33 |
34 | )}
35 |
36 | );
37 | };
38 |
--------------------------------------------------------------------------------
/app/components/videos/VideoThumbnail.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Clock } from 'lucide-react';
3 | import { Video } from '@/lib/types/video';
4 |
5 | interface VideoThumbnailProps {
6 | video: Video;
7 | dominantColor: { name: string; hex: string } | null;
8 | duration: string;
9 | }
10 |
11 | export const VideoThumbnail: React.FC = ({ video, dominantColor, duration }) => {
12 | return (
13 |
14 |

19 | {video.duration && (
20 |
21 |
22 | {duration}
23 |
24 | )}
25 | {dominantColor && (
26 |
31 | )}
32 |
33 | );
34 | };
35 |
--------------------------------------------------------------------------------
/app/hooks/use-conveyor.ts:
--------------------------------------------------------------------------------
1 | type ConveyorKey = keyof Window['conveyor']
2 |
3 | /**
4 | * Use the conveyor for inter-process communication
5 | *
6 | * @param key - The key of the conveyor object to use
7 | * @returns The conveyor object or the keyed object
8 | */
9 | export const useConveyor = (
10 | key?: T
11 | ): T extends ConveyorKey ? Window['conveyor'][T] : Window['conveyor'] => {
12 | const conveyor = window.conveyor
13 |
14 | if (key) {
15 | return conveyor[key] as any
16 | }
17 |
18 | return conveyor as any
19 | }
20 |
--------------------------------------------------------------------------------
/app/hooks/useClickOutside.tsx:
--------------------------------------------------------------------------------
1 | import { RefObject, useEffect } from 'react'
2 |
3 | export const useClickOutside = (refs: RefObject[], callback: () => void): void => {
4 | useEffect(() => {
5 | const handleClickOutside = (event: globalThis.MouseEvent): void => {
6 | const isClickOutside = refs.every((ref) => ref.current && !ref.current.contains(event.target as Node))
7 |
8 | if (isClickOutside) {
9 | callback()
10 | }
11 | }
12 |
13 | document.addEventListener('mousedown', handleClickOutside)
14 | return () => document.removeEventListener('mousedown', handleClickOutside)
15 | }, [refs, callback])
16 | }
17 |
--------------------------------------------------------------------------------
/app/hooks/useDarkMode.ts:
--------------------------------------------------------------------------------
1 | import { useState, useEffect, useCallback } from 'react';
2 |
3 | type Theme = 'dark' | 'light';
4 |
5 | export const useDarkMode = (): [Theme, () => void] => {
6 | const [theme, setTheme] = useState('light');
7 |
8 | useEffect(() => {
9 | const isDarkMode = document.documentElement.classList.contains('dark');
10 | setTheme(isDarkMode ? 'dark' : 'light');
11 | }, []);
12 |
13 | const toggleTheme = useCallback(() => {
14 | setTheme((prevTheme) => {
15 | const newTheme = prevTheme === 'light' ? 'dark' : 'light';
16 | document.documentElement.classList.toggle('dark', newTheme === 'dark');
17 | return newTheme;
18 | });
19 | }, []);
20 |
21 | return [theme, toggleTheme];
22 | };
23 |
--------------------------------------------------------------------------------
/app/hooks/useFaceExtraction.tsx:
--------------------------------------------------------------------------------
1 | import type { FaceData, LoadedFaces } from '@/lib/types/search';
2 | import { Video } from '@/lib/types/video';
3 | import { useMemo } from 'react';
4 |
5 | const isUnknownFace = (faceName: string): boolean => {
6 | return faceName.toLowerCase().includes('unknown');
7 | };
8 |
9 | const FACE_PROTOCOL = 'face://' as const;
10 |
11 | const getFirstFaceImage = (name: string, loadedFaces: LoadedFaces): string | null => {
12 | const lowercasedName = name.toLowerCase();
13 | const foundKey = Object.keys(loadedFaces).find((key) => key.toLowerCase() === lowercasedName);
14 |
15 | if (foundKey && loadedFaces[foundKey]?.length > 0) {
16 | return loadedFaces[foundKey][0];
17 | }
18 |
19 | return null;
20 | };
21 |
22 | const processScenes = (videos: Video[], loadedFaces: LoadedFaces): Map => {
23 | const faceMap = new Map();
24 |
25 | for (const video of videos) {
26 | if (!video.scenes?.length) continue;
27 |
28 | for (const scene of video.scenes) {
29 | if (!scene.faces?.length) continue;
30 |
31 | for (const faceName of scene.faces) {
32 | if (isUnknownFace(faceName)) continue;
33 |
34 | const existing = faceMap.get(faceName);
35 |
36 | if (existing) {
37 | existing.count++;
38 | } else {
39 | const thumbnail = getFirstFaceImage(faceName.toLowerCase(), loadedFaces);
40 | faceMap.set(faceName, {
41 | name: faceName,
42 | count: 1,
43 | thumbnail: thumbnail ? `${FACE_PROTOCOL}${thumbnail}` : undefined,
44 | });
45 | }
46 | }
47 | }
48 | }
49 |
50 | return faceMap;
51 | };
52 |
53 | export const useFaceExtraction = (videos: Video[], loadedFaces: LoadedFaces): FaceData[] => {
54 | return useMemo(() => {
55 | const faceMap = processScenes(videos, loadedFaces);
56 | return Array.from(faceMap.values()).sort((a, b) => b.count - a.count);
57 | }, [videos, loadedFaces]);
58 | };
--------------------------------------------------------------------------------
/app/hooks/useFilterSidebar.ts:
--------------------------------------------------------------------------------
1 | import { useState, useCallback } from 'react';
2 |
3 | export const useFilterSidebar = (filters: Record) => {
4 | const [expandedCategories, setExpandedCategories] = useState>(
5 | new Set(Object.keys(filters))
6 | );
7 | const [searchTerms, setSearchTerms] = useState>({});
8 |
9 | const toggleCategory = useCallback((category: string) => {
10 | setExpandedCategories((prev) => {
11 | const newExpanded = new Set(prev);
12 | if (newExpanded.has(category)) {
13 | newExpanded.delete(category);
14 | } else {
15 | newExpanded.add(category);
16 | }
17 | return newExpanded;
18 | });
19 | }, []);
20 |
21 | const handleSearchTermChange = useCallback((category: string, value: string) => {
22 | setSearchTerms((prev) => ({ ...prev, [category]: value }));
23 | }, []);
24 |
25 | const getFilteredValues = useCallback(
26 | (category: string, values: string[]) => {
27 | const searchTerm = searchTerms[category]?.toLowerCase() || '';
28 | return values.filter((value) => value.toLowerCase().includes(searchTerm));
29 | },
30 | [searchTerms]
31 | );
32 |
33 | return {
34 | expandedCategories,
35 | searchTerms,
36 | toggleCategory,
37 | handleSearchTermChange,
38 | getFilteredValues,
39 | };
40 | };
41 |
--------------------------------------------------------------------------------
/app/hooks/useFilteredFaces.tsx:
--------------------------------------------------------------------------------
1 | import { FaceData } from "@/lib/types/search"
2 | import { useMemo } from "react"
3 |
4 | export const useFilteredFaces = (faces: FaceData[], query: string): FaceData[] => {
5 | return useMemo(() => {
6 | if (!query) return faces
7 |
8 | const lowercaseQuery = query.toLowerCase()
9 | return faces.filter((face) => face.name.toLowerCase().includes(lowercaseQuery))
10 | }, [faces, query])
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/app/hooks/useGeneration.ts:
--------------------------------------------------------------------------------
1 | import { useState, useCallback } from 'react';
2 | import { GenerationResult, VideoConfig } from '@/lib/types/search';
3 | import { ExportedScene, Scene } from '@/lib/types/scene';
4 |
5 | const stitchVideos = async (scenes: ExportedScene[], videoConfig: VideoConfig): Promise => {
6 | const outputFilename = `rough_cut_${Date.now()}`;
7 | const videoFilename = `${outputFilename}.mp4`;
8 | const fcpxmlFilename = `${outputFilename}.fcpxml`;
9 |
10 | await window.conveyor.app.stitchVideos(
11 | scenes,
12 | videoFilename,
13 | videoConfig.aspectRatio,
14 | videoConfig.fps
15 | );
16 |
17 | return {
18 | message: `Rough cut "${videoFilename}"`,
19 | videoPath: videoFilename,
20 | fcpxmlPath: fcpxmlFilename,
21 | };
22 | };
23 |
24 | export const useGeneration = (selectedScenes: Set, searchResults: Scene[], videoConfig: VideoConfig) => {
25 | const [generationStatus, setGenerationStatus] = useState(null);
26 | const [generationResult, setGenerationResult] = useState(null);
27 | const [loading, setLoading] = useState(false);
28 |
29 | const handleGenerateRoughCut = useCallback(async () => {
30 | if (selectedScenes.size === 0) {
31 | setGenerationStatus('Please select at least one scene to generate a rough cut.');
32 | return;
33 | }
34 |
35 | setLoading(true);
36 | setGenerationStatus('Generating rough cut...');
37 | setGenerationResult(null);
38 |
39 | try {
40 | const scenesToStitch: ExportedScene[] = Array.from(selectedScenes)
41 | .map((index) => {
42 | const scene = searchResults[index];
43 | return {
44 | source: scene.source,
45 | startTime: scene.startTime,
46 | endTime: scene.endTime,
47 | };
48 | })
49 | .sort((a, b) => {
50 | if (a.source < b.source) return -1;
51 | if (a.source > b.source) return 1;
52 | return a.startTime - b.startTime;
53 | });
54 |
55 | const result = await stitchVideos(scenesToStitch, videoConfig);
56 |
57 | setGenerationStatus(result.message);
58 | setGenerationResult(result);
59 | } catch (error) {
60 | console.error('Error generating rough cut:', error);
61 | const errorMessage = error instanceof Error ? error.message : 'Unknown error';
62 | setGenerationStatus(`Error generating rough cut: ${errorMessage}`);
63 | } finally {
64 | setLoading(false);
65 | }
66 | }, [selectedScenes, searchResults, videoConfig]);
67 |
68 | const handleOpenVideo = useCallback(async () => {
69 | if (!generationResult?.videoPath) return;
70 |
71 | try {
72 | await window.conveyor.app.openFile(generationResult.videoPath);
73 | } catch (error) {
74 | console.error('Error opening video:', error);
75 | const errorMessage = error instanceof Error ? error.message : 'Unknown error';
76 | setGenerationStatus(`Error opening video file: ${errorMessage}`);
77 | }
78 | }, [generationResult]);
79 |
80 | const handleShowInFinder = useCallback(async () => {
81 | if (!generationResult?.videoPath) return;
82 |
83 | try {
84 | await window.conveyor.app.showInFolder(generationResult.videoPath);
85 | } catch (error) {
86 | console.error('Error showing in finder:', error);
87 | const errorMessage = error instanceof Error ? error.message : 'Unknown error';
88 | setGenerationStatus(`Error showing file in folder: ${errorMessage}`);
89 | }
90 | }, [generationResult]);
91 |
92 | return {
93 | generationStatus,
94 | generationResult,
95 | loading,
96 | handleGenerateRoughCut,
97 | handleOpenVideo,
98 | handleShowInFinder,
99 | setGenerationStatus,
100 | setGenerationResult,
101 | };
102 | };
--------------------------------------------------------------------------------
/app/hooks/useSearch.ts:
--------------------------------------------------------------------------------
1 |
2 | import { useState } from 'react';
3 | import { Scene } from '@/lib/types/scene';
4 | import { SearchMetadata } from '@/lib/types/search';
5 |
6 | export const useSearch = (prompt: string, setVideoConfig) => {
7 | const [loading, setLoading] = useState(false);
8 | const [searchResults, setSearchResults] = useState([]);
9 | const [searchMetadata, setSearchMetadata] = useState({});
10 |
11 | const handleSearch = async () => {
12 | if (!prompt.trim()) return;
13 |
14 | setLoading(true);
15 | setSearchResults([]);
16 |
17 | try {
18 | const { results, aspect_ratio, faces } = await window.conveyor.app.searchDocuments(prompt);
19 | setSearchResults(results);
20 |
21 | if (aspect_ratio && faces) {
22 | setSearchMetadata({
23 | aspectRatio: aspect_ratio,
24 | faces,
25 | });
26 | }
27 |
28 | if (aspect_ratio) {
29 | setVideoConfig((prev) => ({ ...prev, aspectRatio: aspect_ratio }));
30 | }
31 | } catch (error) {
32 | console.error('Error searching scenes:', error);
33 | } finally {
34 | setLoading(false);
35 | }
36 | };
37 |
38 | return { loading, searchResults, searchMetadata, handleSearch, setSearchResults };
39 | };
40 |
--------------------------------------------------------------------------------
/app/hooks/useSearchSuggestions.ts:
--------------------------------------------------------------------------------
1 | import { useState, useEffect } from 'react';
2 | import { SearchSuggestion, VideoMetadataSummary } from '@/lib/types/search';
3 | import { Video, VideoMetadataMap } from '@/lib/types/video';
4 |
5 | const summarizeMetadata = (videoMetadata: VideoMetadataMap): VideoMetadataSummary => {
6 | const topFaces = Array.from(videoMetadata.faces.entries())
7 | .sort((a, b) => b[1] - a[1])
8 | .slice(0, 5)
9 | .map(([name, count]) => ({ name, count }));
10 |
11 | const topObjects = Array.from(videoMetadata.objects.entries())
12 | .sort((a, b) => b[1] - a[1])
13 | .slice(0, 10)
14 | .map(([name, count]) => ({ name, count }));
15 |
16 | const topEmotions = Array.from(videoMetadata.emotions.entries())
17 | .sort((a, b) => b[1] - a[1])
18 | .slice(0, 5)
19 | .map(([name, count]) => ({ name, count }));
20 |
21 | const shotTypes = Array.from(videoMetadata.shotTypes.entries())
22 | .sort((a, b) => b[1] - a[1])
23 | .map(([name, count]) => ({ name, count }));
24 |
25 | const aspectRatios = Array.from(videoMetadata.aspectRatios.entries())
26 | .sort((a, b) => b[1] - a[1])
27 | .map(([name, count]) => ({ name, count }));
28 |
29 | const cameras = Array.from(videoMetadata.cameras.entries())
30 | .sort((a, b) => b[1] - a[1])
31 | .map(([name, count]) => ({ name, count }));
32 |
33 | const topColors = Array.from(videoMetadata.colors.entries())
34 | .sort((a, b) => b[1] - a[1])
35 | .slice(0, 5)
36 | .map(([name, count]) => ({ name, count }));
37 |
38 | return {
39 | totalScenes: videoMetadata.totalScenes,
40 | topFaces,
41 | topObjects,
42 | topEmotions,
43 | shotTypes,
44 | aspectRatios,
45 | cameras,
46 | sampleDescriptions: videoMetadata.descriptions.slice(0, 10),
47 | topColors,
48 | };
49 | };
50 |
51 | export const useSearchSuggestions = (videos: Video[], videoMetadata: VideoMetadataMap) => {
52 | const [suggestions, setSuggestions] = useState([]);
53 | const [loadingSuggestions, setLoadingSuggestions] = useState(false);
54 |
55 | useEffect(() => {
56 | const generateSuggestions = async () => {
57 | if (videos.length === 0) return;
58 |
59 | setLoadingSuggestions(true);
60 |
61 | try {
62 | const metadataSummary = summarizeMetadata(videoMetadata);
63 | const response = await window.conveyor.app.generateSearchSuggestions(metadataSummary);
64 | setSuggestions(response);
65 | } catch (error) {
66 | console.error('Error generating suggestions:', error);
67 | } finally {
68 | setLoadingSuggestions(false);
69 | }
70 | };
71 |
72 | generateSuggestions();
73 | }, [videos, videoMetadata]);
74 |
75 | return { suggestions, loadingSuggestions };
76 | };
--------------------------------------------------------------------------------
/app/hooks/useSettings.ts:
--------------------------------------------------------------------------------
1 | import { useState, useEffect, useCallback } from 'react';
2 | import { SettingsConfig } from '@/lib/types/settings';
3 |
4 | export const useSettings = () => {
5 | const [settings, setSettings] = useState>({});
6 |
7 | useEffect(() => {
8 | const fetchSettings = async () => {
9 | try {
10 | const fetchedSettings = await window.conveyor.app.getSettings();
11 | setSettings(fetchedSettings);
12 | } catch (error) {
13 | console.error('Error fetching settings:', error);
14 | }
15 | };
16 | fetchSettings();
17 | }, []);
18 |
19 | const handleInputChange = useCallback((e: React.ChangeEvent) => {
20 | const { id, value, type, checked } = e.target;
21 | setSettings((prev) => ({
22 | ...prev,
23 | [id]: type === 'checkbox' ? checked : type === 'number' ? parseFloat(value) : value,
24 | }));
25 | }, []);
26 |
27 | const handleSwitchChange = useCallback((id: string, checked: boolean) => {
28 | setSettings((prev) => ({ ...prev, [id]: checked }));
29 | }, []);
30 |
31 | const handleSaveSettings = useCallback(async () => {
32 | try {
33 | await window.conveyor.app.saveSettings(settings);
34 | alert('Settings saved successfully!');
35 | } catch (error) {
36 | console.error('Error saving settings:', error);
37 | alert('Error saving settings. Please check the console for details.');
38 | }
39 | }, [settings]);
40 |
41 | return {
42 | settings,
43 | handleInputChange,
44 | handleSwitchChange,
45 | handleSaveSettings,
46 | };
47 | };
48 |
--------------------------------------------------------------------------------
/app/hooks/useVideoCard.ts:
--------------------------------------------------------------------------------
1 | import { useMemo } from 'react';
2 | import { Video } from '@/lib/types/video';
3 |
4 | const formatDuration = (seconds: number) => {
5 | const mins = Math.floor(seconds / 60);
6 | const secs = Math.floor(seconds % 60);
7 | return `${mins}:${secs.toString().padStart(2, '0')}`;
8 | };
9 |
10 | const getSceneCount = (video: Video) => video.scenes?.length || 0;
11 |
12 | const getUniqueFaces = (video: Video) => {
13 | const faces = new Set();
14 | video.scenes?.forEach((scene) => {
15 | scene.faces?.forEach((face) => faces.add(face));
16 | });
17 | return Array.from(faces);
18 | };
19 |
20 | const getUniqueLocations = (video: Video) => {
21 | const locations = new Set();
22 | video.scenes?.forEach((scene) => {
23 | if (scene.location) locations.add(scene.location);
24 | });
25 | return Array.from(locations);
26 | };
27 |
28 | const getDominantColor = (video: Video) => {
29 | const colorMap = new Map();
30 |
31 | video.scenes?.forEach((scene) => {
32 | if (scene.dominantColorName && scene.dominantColorHex) {
33 | const key = scene.dominantColorHex;
34 | if (colorMap.has(key)) {
35 | colorMap.get(key)!.count++;
36 | } else {
37 | colorMap.set(key, {
38 | name: scene.dominantColorName,
39 | hex: scene.dominantColorHex,
40 | count: 1,
41 | });
42 | }
43 | }
44 | });
45 |
46 | if (colorMap.size === 0) return null;
47 |
48 | return Array.from(colorMap.values()).sort((a, b) => b.count - a.count)[0];
49 | };
50 |
51 | export const useVideoCard = (video: Video) => {
52 | const duration = useMemo(() => formatDuration(parseInt(video.duration?.toString() || '0')), [video.duration]);
53 | const sceneCount = useMemo(() => getSceneCount(video), [video.scenes]);
54 | const uniqueFaces = useMemo(() => getUniqueFaces(video), [video.scenes]);
55 | const uniqueLocations = useMemo(() => getUniqueLocations(video), [video.scenes]);
56 | const dominantColor = useMemo(() => getDominantColor(video), [video.scenes]);
57 |
58 | const videoTitle = useMemo(() => {
59 | return (
60 | video.source
61 | .split('/')
62 | .pop()
63 | ?.replace(/\.[^/.]+$/, '') || video.source
64 | );
65 | }, [video.source]);
66 |
67 | return {
68 | duration,
69 | sceneCount,
70 | uniqueFaces,
71 | uniqueLocations,
72 | dominantColor,
73 | videoTitle,
74 | };
75 | };
76 |
--------------------------------------------------------------------------------
/app/hooks/useVideoMetadata.ts:
--------------------------------------------------------------------------------
1 | import { useMemo } from 'react';
2 | import { Video, VideoMetadataMap } from '@/lib/types/video';
3 |
4 | const processScene = (scene, metadata) => {
5 | if (scene.faces) {
6 | scene.faces.forEach((face) => {
7 | if (!face.toLocaleLowerCase().includes('unknown')) {
8 | metadata.faces.set(face, (metadata.faces.get(face) || 0) + 1);
9 | }
10 | });
11 | }
12 |
13 | if (scene.objects) {
14 | scene.objects.forEach((obj) => {
15 | if (obj) {
16 | metadata.objects.set(obj, (metadata.objects.get(obj) || 0) + 1);
17 | }
18 | });
19 | }
20 |
21 | if (scene.emotions) {
22 | scene.emotions.forEach((emotion) => {
23 | if (emotion) {
24 | metadata.emotions.set(emotion.emotion, (metadata.emotions.get(emotion.emotion) || 0) + 1);
25 | }
26 | });
27 | }
28 |
29 | if (scene.shot_type && scene.shot_type !== 'N/A') {
30 | metadata.shotTypes.set(scene.shot_type, (metadata.shotTypes.get(scene.shot_type) || 0) + 1);
31 | }
32 | if (scene.dominantColorName && scene.dominantColorName !== 'N/A') {
33 | metadata.colors.set(scene.dominantColorName, (metadata.colors.get(scene.dominantColorName) || 0) + 1);
34 | }
35 |
36 | if (scene.description) {
37 | metadata.descriptions.push(scene.description);
38 | }
39 | };
40 |
41 | const processVideo = (video, metadata) => {
42 | if (video.scenes && video.scenes.length > 0) {
43 | metadata.totalScenes += video.scenes.length;
44 | video.scenes.forEach((scene) => processScene(scene, metadata));
45 | }
46 |
47 | if (video.aspect_ratio) {
48 | metadata.aspectRatios.set(video.aspect_ratio, (metadata.aspectRatios.get(video.aspect_ratio) || 0) + 1);
49 | }
50 |
51 | if (video.camera) {
52 | metadata.cameras.set(video.camera, (metadata.cameras.get(video.camera) || 0) + 1);
53 | }
54 | };
55 |
56 | export const useVideoMetadata = (videos: Video[]): VideoMetadataMap => {
57 | return useMemo(() => {
58 | const metadata: VideoMetadataMap = {
59 | faces: new Map(),
60 | objects: new Map(),
61 | emotions: new Map(),
62 | shotTypes: new Map(),
63 | aspectRatios: new Map(),
64 | cameras: new Map(),
65 | descriptions: [] as string[],
66 | totalScenes: 0,
67 | colors: new Map(),
68 | };
69 |
70 | videos.forEach((video) => processVideo(video, metadata));
71 |
72 | return metadata;
73 | }, [videos]);
74 | };
--------------------------------------------------------------------------------
/app/hooks/useWelcome.ts:
--------------------------------------------------------------------------------
1 | import { useState, useEffect, useCallback } from 'react';
2 | import { IndexingProgressProps } from '../components/IndexingProgress';
3 | import { useConveyor } from './use-conveyor';
4 |
5 | export const useWelcome = () => {
6 | const [selectedFolder, setSelectedFolder] = useState(null);
7 | const [videos, setVideos] = useState([]);
8 | const [isIndexing, setIsIndexing] = useState(false);
9 | const [indexingProgress, setIndexingProgress] = useState(null);
10 | const appApi = useConveyor('app');
11 |
12 | useEffect(() => {
13 | if (!appApi) return;
14 |
15 | const unsubscribe = appApi.on('indexing-progress', (progress) => {
16 | setIndexingProgress(progress);
17 | });
18 |
19 | return () => {
20 | unsubscribe();
21 | };
22 | }, [appApi]);
23 |
24 | const handleSelectFolder = useCallback(async () => {
25 | if (!appApi) return;
26 | const result = await appApi.selectFolder();
27 | if (result) {
28 | setSelectedFolder(result.folderPath);
29 | setVideos(result.videos);
30 | }
31 | }, [appApi]);
32 |
33 | const handleStartIndexing = useCallback(async () => {
34 | if (!appApi) return;
35 | setIsIndexing(true);
36 | await appApi.startIndexing(videos);
37 | setIsIndexing(false);
38 | }, [appApi, videos]);
39 |
40 | const handleCancelIndexing = useCallback(() => {
41 | setSelectedFolder(null);
42 | setVideos([]);
43 | setIsIndexing(false);
44 | setIndexingProgress(null);
45 | }, []);
46 |
47 | return {
48 | selectedFolder,
49 | videos,
50 | isIndexing,
51 | indexingProgress,
52 | handleSelectFolder,
53 | handleStartIndexing,
54 | handleCancelIndexing,
55 | };
56 | };
57 |
--------------------------------------------------------------------------------
/app/icons/AsteriskIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const AsteriskIcon = (props: React.SVGProps) => (
4 |
7 | );
8 |
--------------------------------------------------------------------------------
/app/icons/CodeWindowIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const CodeWindowIcon = (props: React.SVGProps) => (
4 |
7 | );
8 |
--------------------------------------------------------------------------------
/app/icons/ColorSchemeIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const ColorSchemeIcon = (props: React.SVGProps) => (
4 |
7 | );
8 |
--------------------------------------------------------------------------------
/app/icons/ErrorIcon.tsx:
--------------------------------------------------------------------------------
1 | export const ErrorIcon: React.FC> = (props) => (
2 |
10 | )
11 |
--------------------------------------------------------------------------------
/app/icons/FanIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const FanIcon: React.FC> = (props) => (
4 |
7 | );
8 |
--------------------------------------------------------------------------------
/app/icons/FolderCheckIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const FolderCheckIcon: React.FC> = (props) => (
4 |
8 | );
--------------------------------------------------------------------------------
/app/icons/FolderIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const FolderIcon: React.FC> = (props) => (
4 |
7 | );
8 |
--------------------------------------------------------------------------------
/app/icons/GearIcon.tsx:
--------------------------------------------------------------------------------
1 |
2 | import React from 'react';
3 |
4 | export const GearIcon: React.FC> = (props) => (
5 |
28 | );
29 |
--------------------------------------------------------------------------------
/app/icons/IndexIcon.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const IndexIcon: React.FC> = (props) => (
4 |
8 | );
9 |
10 |
--------------------------------------------------------------------------------
/app/icons/PlayIcon.tsx:
--------------------------------------------------------------------------------
1 | export const PlayIcon = () => (
2 |
5 | )
6 |
--------------------------------------------------------------------------------
/app/icons/ThunderIcon.tsx:
--------------------------------------------------------------------------------
1 | export const ThunderIcon = () => (
2 |
5 | )
6 |
--------------------------------------------------------------------------------
/app/icons/VideoIcon.tsx:
--------------------------------------------------------------------------------
1 | export const VideoIcon = () => (
2 |
6 | )
7 |
--------------------------------------------------------------------------------
/app/index.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | import { ConveyorApi } from '@/lib/conveyor/api'
4 |
5 | declare global {
6 | interface Window {
7 | conveyor: ConveyorApi
8 | }
9 | }
10 |
11 | declare module '*.css' {
12 | const content: string
13 | export default content
14 | }
15 |
16 | declare module '*.png' {
17 | const content: string
18 | export default content
19 | }
20 |
21 | declare module '*.jpg' {
22 | const content: string
23 | export default content
24 | }
25 |
26 | declare module '*.jpeg' {
27 | const content: string
28 | export default content
29 | }
30 |
31 | declare module '*.svg' {
32 | const content: string
33 | export default content
34 | }
35 |
36 | declare module '*.web' {
37 | const content: string
38 | export default content
39 | }
40 |
--------------------------------------------------------------------------------
/app/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Electron
6 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/app/pages/Settings.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Button } from '@/app/components/ui/Button';
3 | import { useSettings } from '@/app/hooks/useSettings';
4 | import { SettingsSection } from '@/app/components/settings/SettingsSection';
5 | import { SettingsInput } from '@/app/components/settings/SettingsInput';
6 | import { SettingsSwitch } from '@/app/components/settings/SettingsSwitch';
7 |
8 | export const Settings: React.FC = () => {
9 | const { settings, handleInputChange, handleSwitchChange, handleSaveSettings } = useSettings();
10 |
11 | return (
12 |
13 |
14 |
21 |
28 |
35 |
45 |
55 |
62 |
69 | handleSwitchChange('resize_to_720p', checked)}
74 | />
75 |
76 | Save Settings
77 |
78 | );
79 | };
--------------------------------------------------------------------------------
/app/pages/Videos.tsx:
--------------------------------------------------------------------------------
1 | import '@/app/styles/Videos.css';
2 | import React, { useState } from 'react';
3 | import { FilterSidebar } from '@/app/components/videos/FilterSidebar';
4 | import { useVideos } from '@/app/hooks/useVideos';
5 | import { VideoGrid } from '@/app/components/videos/VideoGrid';
6 | import { VideoList } from '@/app/components/videos/VideoList';
7 | import { FilterBar } from '@/app/components/videos/FilterBar';
8 | import { Button } from '@/app/components/ui/Button';
9 | import { X } from 'lucide-react';
10 |
11 | type ViewMode = 'grid' | 'list';
12 |
13 | export const Videos: React.FC = () => {
14 | const {
15 | filteredVideos,
16 | filters,
17 | setFilters,
18 | searchQuery,
19 | setSearchQuery,
20 | isLoading,
21 | filterableData,
22 | activeFiltersCount,
23 | clearAllFilters,
24 | } = useVideos();
25 |
26 | const [viewMode, setViewMode] = useState('grid');
27 | const [showFilters, setShowFilters] = useState(true);
28 |
29 | const removeFilter = (category: string, value: string) => {
30 | const newFilters = { ...filters };
31 | newFilters[category] = newFilters[category].filter((v) => v !== value);
32 | setFilters(newFilters);
33 | };
34 |
35 | return (
36 |
37 | {showFilters && (
38 |
setShowFilters(false)}
43 | />
44 | )}
45 |
46 |
47 |
56 |
57 | {activeFiltersCount > 0 && (
58 |
59 |
Active filters:
60 | {Object.entries(filters).map(([category, values]) =>
61 | values.map((value) => (
62 |
63 | {category.slice(0, -1)}:
64 | {value}
65 | removeFilter(category, value)} className="filter-chip-remove">
66 |
67 |
68 |
69 | ))
70 | )}
71 |
72 | Clear all
73 |
74 |
75 | )}
76 |
77 | {isLoading ? (
78 |
79 |
80 |
Loading videos...
81 |
82 | ) : filteredVideos.length === 0 ? (
83 |
84 |
🎬
85 |
No videos found
86 |
87 | {searchQuery || activeFiltersCount > 0
88 | ? 'Try adjusting your search or filters'
89 | : 'Start by adding videos to your library'}
90 |
91 | {(searchQuery || activeFiltersCount > 0) &&
Clear filters}
92 |
93 | ) : viewMode === 'grid' ? (
94 |
95 | ) : (
96 |
97 | )}
98 |
99 |
100 | );
101 | };
--------------------------------------------------------------------------------
/app/renderer.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import ReactDOM from 'react-dom/client'
3 | import { ErrorBoundary } from './components/ErrorBoundary'
4 | import App from './app'
5 |
6 | ReactDOM.createRoot(document.getElementById('app') as HTMLElement).render(
7 |
8 |
9 |
10 |
11 |
12 | )
13 |
--------------------------------------------------------------------------------
/app/styles/FilterSidebar.css:
--------------------------------------------------------------------------------
1 |
2 | .filter-sidebar {
3 | display: flex;
4 | flex-direction: column;
5 | height: 100%;
6 | background: var(--background);
7 | border-left: 1px solid var(--border);
8 | width: 20%;
9 | }
10 |
11 | .filter-sidebar-header {
12 | display: flex;
13 | align-items: center;
14 | justify-content: space-between;
15 | padding: 1rem;
16 | border-bottom: 1px solid var(--border);
17 | flex-shrink: 0;
18 | }
19 |
20 | .filter-sidebar-header h3 {
21 | margin: 0;
22 | font-size: 1.125rem;
23 | font-weight: 600;
24 | }
25 |
26 | .filter-sidebar-content {
27 | overflow-y: auto;
28 | }
29 |
30 | .filter-sidebar-content::-webkit-scrollbar {
31 | width: 8px;
32 | }
33 |
34 | .filter-sidebar-content::-webkit-scrollbar-track {
35 | background: transparent;
36 | }
37 |
38 | .filter-sidebar-content::-webkit-scrollbar-thumb {
39 | background: var(--border);
40 | border-radius: 4px;
41 | }
42 |
43 | .filter-sidebar-content::-webkit-scrollbar-thumb:hover {
44 | background: var(--muted-foreground);
45 | }
46 |
47 | .filter-group {
48 | margin-bottom: 0.5rem;
49 | }
50 |
51 | .filter-group-header {
52 | width: 100%;
53 | display: flex;
54 | align-items: center;
55 | justify-content: space-between;
56 | padding: 0.75rem;
57 | background: transparent;
58 | border: none;
59 | border-radius: 6px;
60 | cursor: pointer;
61 | transition: background-color 0.2s;
62 | }
63 |
64 | .filter-group-header:hover {
65 | background: var(--muted);
66 | }
67 |
68 | .filter-group-title {
69 | display: flex;
70 | align-items: center;
71 | gap: 0.5rem;
72 | font-weight: 500;
73 | }
74 |
75 | .filter-count-badge {
76 | display: inline-flex;
77 | align-items: center;
78 | justify-content: center;
79 | min-width: 1.5rem;
80 | height: 1.5rem;
81 | padding: 0 0.5rem;
82 | background: var(--primary);
83 | color: var(--primary-foreground);
84 | border-radius: 999px;
85 | font-size: 0.75rem;
86 | font-weight: 600;
87 | }
88 |
89 | .clear-category-btn {
90 | font-size: 0.875rem;
91 | }
92 |
93 | .filter-group-content {
94 | padding: 0.5rem 0.75rem 0.75rem 0.75rem;
95 | }
96 |
97 | .filter-search {
98 | margin-bottom: 0.75rem;
99 | }
100 |
101 | .filter-options {
102 | display: flex;
103 | flex-direction: column;
104 | gap: 0.5rem;
105 | max-height: 300px;
106 | overflow-y: auto;
107 | }
108 |
109 | .filter-options::-webkit-scrollbar {
110 | width: 6px;
111 | }
112 |
113 | .filter-options::-webkit-scrollbar-track {
114 | background: transparent;
115 | }
116 |
117 | .filter-options::-webkit-scrollbar-thumb {
118 | background: var(--border);
119 | border-radius: 3px;
120 | }
121 |
122 | .filter-option {
123 | display: flex;
124 | align-items: center;
125 | gap: 0.5rem;
126 | padding: 0.5rem;
127 | border-radius: 4px;
128 | cursor: pointer;
129 | transition: background-color 0.2s;
130 | }
131 |
132 | .filter-option:hover {
133 | background: var(--muted);
134 | }
135 |
136 | .filter-option.selected {
137 | background: var(--accent);
138 | }
139 |
140 | .filter-option input[type='checkbox'] {
141 | cursor: pointer;
142 | }
143 |
144 | .filter-option-label {
145 | flex: 1;
146 | font-size: 0.875rem;
147 | }
148 |
149 | .no-results {
150 | color: var(--muted-foreground);
151 | font-size: 0.875rem;
152 | text-align: center;
153 | padding: 1rem;
154 | }
155 |
--------------------------------------------------------------------------------
/app/styles/app.css:
--------------------------------------------------------------------------------
1 | @import './globals.css';
2 | @import './window.css';
3 |
4 | body {
5 | font-family:
6 | system-ui,
7 | -apple-system,
8 | Arial,
9 | Helvetica,
10 | sans-serif;
11 | font-size: 14px;
12 | margin: 0;
13 | overflow: hidden;
14 | }
15 |
16 | html,
17 | body,
18 | #app {
19 | height: 100%;
20 | margin: 0;
21 | line-height: 1.4;
22 | }
23 |
24 | .custom-scrollbar::-webkit-scrollbar {
25 | width: 8px;
26 | height: 8px;
27 | }
28 |
29 | .custom-scrollbar::-webkit-scrollbar-track {
30 | background: #f1f1f1;
31 | border-radius: 10px;
32 | }
33 |
34 | .custom-scrollbar::-webkit-scrollbar-thumb {
35 | background: #888;
36 | border-radius: 10px;
37 | }
38 |
39 | .custom-scrollbar::-webkit-scrollbar-thumb:hover {
40 | background: #555;
41 | }
--------------------------------------------------------------------------------
/app/styles/globals.css:
--------------------------------------------------------------------------------
1 | @import 'tailwindcss';
2 | @source '@/app';
3 | @source '@/lib';
4 |
5 | @theme {
6 | --color-background: var(--background);
7 | --color-foreground: var(--foreground);
8 | --color-card: var(--card);
9 | --color-card-foreground: var(--card-foreground);
10 | --color-popover: var(--popover);
11 | --color-popover-foreground: var(--popover-foreground);
12 | --color-primary: var(--primary);
13 | --color-primary-foreground: var(--primary-foreground);
14 | --color-secondary: var(--secondary);
15 | --color-secondary-foreground: var(--secondary-foreground);
16 | --color-muted: var(--muted);
17 | --color-muted-foreground: var(--muted-foreground);
18 | --color-accent: var(--accent);
19 | --color-accent-foreground: var(--accent-foreground);
20 | --color-destructive: var(--destructive);
21 | --color-destructive-foreground: var(--destructive-foreground);
22 | --color-border: var(--border);
23 | --color-input: var(--input);
24 | --color-ring: var(--ring);
25 | --color-chart-1: var(--chart-1);
26 | --color-chart-2: var(--chart-2);
27 | --color-chart-3: var(--chart-3);
28 | --color-chart-4: var(--chart-4);
29 | --color-chart-5: var(--chart-5);
30 | --radius-lg: var(--radius);
31 | --radius-md: calc(var(--radius) - 2px);
32 | --radius-sm: calc(var(--radius) - 4px);
33 | }
34 |
35 | @layer base {
36 | :root {
37 | --background: var(--color-neutral-50);
38 | --foreground: var(--color-neutral-900);
39 | --card: hsl(0 0% 100%);
40 | --card-foreground: hsl(0 0% 3.9%);
41 | --popover: hsl(0 0% 100%);
42 | --popover-foreground: hsl(0 0% 3.9%);
43 | --primary: hsl(0 0% 9%);
44 | --primary-foreground: hsl(0 0% 98%);
45 | --secondary: hsl(0 0% 96.1%);
46 | --secondary-foreground: hsl(0 0% 9%);
47 | --muted: hsl(0 0% 96.1%);
48 | --muted-foreground: hsl(0 0% 45.1%);
49 | --accent: hsl(0 0% 96.1%);
50 | --accent-foreground: hsl(0 0% 9%);
51 | --destructive: hsl(0 84.2% 60.2%);
52 | --destructive-foreground: hsl(0 0% 98%);
53 | --border: hsl(0 0% 89.8%);
54 | --input: hsl(0 0% 89.8%);
55 | --ring: hsl(0 0% 3.9%);
56 | --chart-1: hsl(12 76% 61%);
57 | --chart-2: hsl(173 58% 39%);
58 | --chart-3: hsl(197 37% 24%);
59 | --chart-4: hsl(43 74% 66%);
60 | --chart-5: hsl(27 87% 67%);
61 | --radius: 0.5rem;
62 | }
63 |
64 | .dark {
65 | --background: var(--color-neutral-950);
66 | --foreground: var(--color-neutral-100);
67 | --card: hsl(0, 0%, 3.9%);
68 | --card-foreground: hsl(0 0% 98%);
69 | --popover: hsl(0 0% 3.9%);
70 | --popover-foreground: hsl(0 0% 98%);
71 | --primary: hsl(0 0% 98%);
72 | --primary-foreground: hsl(0 0% 9%);
73 | --secondary: hsl(0 0% 14.9%);
74 | --secondary-foreground: hsl(0 0% 98%);
75 | --muted: hsl(0 0% 14.9%);
76 | --muted-foreground: hsl(0 0% 63.9%);
77 | --accent: hsl(0 0% 14.9%);
78 | --accent-foreground: hsl(0 0% 98%);
79 | --destructive: hsl(0 62.8% 30.6%);
80 | --destructive-foreground: hsl(0 0% 98%);
81 | --border: hsl(0 0% 14.9%);
82 | --input: hsl(0 0% 14.9%);
83 | --ring: hsl(0 0% 83.1%);
84 | --chart-1: hsl(220 70% 50%);
85 | --chart-2: hsl(160 60% 45%);
86 | --chart-3: hsl(30 80% 55%);
87 | --chart-4: hsl(280 65% 60%);
88 | --chart-5: hsl(340 75% 55%);
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/app/utils/search.ts:
--------------------------------------------------------------------------------
1 | export const MENTION_TRIGGER = '@' as const
2 | export const AUTOCOMPLETE_KEYS = ['ArrowDown', 'ArrowUp', 'Enter', 'Escape', 'Tab'] as const
3 | const MENTION_REGEX = /@(\w+)/g
4 |
5 | interface TextPart {
6 | text: string
7 | isMention: boolean
8 | }
9 |
10 | export const parseMentions = (text: string): TextPart[] => {
11 | const parts: TextPart[] = []
12 | let lastIndex = 0
13 | let match: RegExpExecArray | null
14 |
15 | const regex = new RegExp(MENTION_REGEX)
16 |
17 | while ((match = regex.exec(text)) !== null) {
18 | // Add text before mention
19 | if (match.index > lastIndex) {
20 | parts.push({
21 | text: text.slice(lastIndex, match.index),
22 | isMention: false,
23 | })
24 | }
25 |
26 | // Add mention
27 | parts.push({
28 | text: match[0],
29 | isMention: true,
30 | })
31 |
32 | lastIndex = match.index + match[0].length
33 | }
34 |
35 | // Add remaining text
36 | if (lastIndex < text.length) {
37 | parts.push({
38 | text: text.slice(lastIndex),
39 | isMention: false,
40 | })
41 | }
42 |
43 | return parts
44 | }
45 |
--------------------------------------------------------------------------------
/components.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://ui.shadcn.com/schema.json",
3 | "style": "new-york",
4 | "rsc": false,
5 | "tsx": true,
6 | "tailwind": {
7 | "config": "",
8 | "css": "app/styles/globals.css",
9 | "baseColor": "neutral",
10 | "cssVariables": true
11 | },
12 | "aliases": {
13 | "components": "@/app/components",
14 | "utils": "@/lib/utils",
15 | "ui": "@/app/components/ui",
16 | "lib": "@/lib",
17 | "hooks": "@/lib/hooks"
18 | },
19 | "iconLibrary": "lucide"
20 | }
21 |
--------------------------------------------------------------------------------
/electron-builder.yml:
--------------------------------------------------------------------------------
1 | appId: com.guasam.era
2 | productName: ElectronReactApp
3 | directories:
4 | buildResources: resources/build
5 | files:
6 | - '!**/.vscode/*'
7 | - '!src/*'
8 | - '!electron.vite.config.{js,ts,mjs,cjs}'
9 | - '!{.eslintignore,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,dev-app-update.yml,CHANGELOG.md,README.md}'
10 | - '!{.env,.env.*,.npmrc,pnpm-lock.yaml}'
11 | - '!{tsconfig.json,tsconfig.node.json,tsconfig.web.json}'
12 | - "!python/.venv/**"
13 |
14 | asarUnpack:
15 | - resources/**
16 | - node_modules/ffmpeg-static/**/*
17 | - node_modules/ffprobe-static/**/*
18 | - node_modules/exiftool-vendored/**/*
19 | win:
20 | executableName: ElectronReactApp
21 | nsis:
22 | artifactName: ${name}-${version}-setup.${ext}
23 | shortcutName: ${productName}
24 | uninstallDisplayName: ${productName}
25 | createDesktopShortcut: always
26 | mac:
27 | entitlementsInherit: resources/build/entitlements.mac.plist
28 | extendInfo:
29 | - NSCameraUsageDescription: Application requests access to the device's camera.
30 | - NSMicrophoneUsageDescription: Application requests access to the device's microphone.
31 | - NSDocumentsFolderUsageDescription: Application requests access to the user's Documents folder.
32 | - NSDownloadsFolderUsageDescription: Application requests access to the user's Downloads folder.
33 | notarize: false
34 | dmg:
35 | artifactName: ${name}-${version}.${ext}
36 | linux:
37 | target:
38 | - AppImage
39 | - snap
40 | - deb
41 | maintainer: electronjs.org
42 | category: Utility
43 | appImage:
44 | artifactName: ${name}-${version}.${ext}
45 | npmRebuild: false
46 | publish:
47 | provider: generic
48 | url: https://example.com/auto-updates
49 |
--------------------------------------------------------------------------------
/electron.vite.config.ts:
--------------------------------------------------------------------------------
1 | import { resolve } from 'path'
2 | import react from '@vitejs/plugin-react'
3 | import tailwindcss from '@tailwindcss/vite'
4 | import { defineConfig, externalizeDepsPlugin } from 'electron-vite'
5 |
6 | // Shared alias configuration
7 | const aliases = {
8 | '@/app': resolve(__dirname, 'app'),
9 | '@/lib': resolve(__dirname, 'lib'),
10 | '@/resources': resolve(__dirname, 'resources'),
11 | }
12 |
13 | export default defineConfig({
14 | main: {
15 | build: {
16 | rollupOptions: {
17 | input: {
18 | main: resolve(__dirname, 'lib/main/main.ts'),
19 | },
20 | external: ['node-whisper'],
21 | },
22 | },
23 | resolve: {
24 | alias: aliases,
25 | },
26 | plugins: [externalizeDepsPlugin()],
27 | },
28 | preload: {
29 | build: {
30 | rollupOptions: {
31 | input: {
32 | preload: resolve(__dirname, 'lib/preload/preload.ts'),
33 | },
34 | external: ['node-whisper'],
35 | },
36 | },
37 | resolve: {
38 | alias: aliases,
39 | },
40 | plugins: [externalizeDepsPlugin()],
41 | },
42 | renderer: {
43 | root: './app',
44 | build: {
45 | rollupOptions: {
46 | input: {
47 | index: resolve(__dirname, 'app/index.html'),
48 | },
49 | },
50 | },
51 | resolve: {
52 | alias: aliases,
53 | },
54 | plugins: [tailwindcss(), react()],
55 | },
56 | })
57 |
--------------------------------------------------------------------------------
/eslint.config.mjs:
--------------------------------------------------------------------------------
1 | import eslint from '@eslint/js'
2 | import tseslint from 'typescript-eslint'
3 | import reactPlugin from 'eslint-plugin-react'
4 | import reactHooksPlugin from 'eslint-plugin-react-hooks'
5 |
6 | export default [
7 | {
8 | ignores: [
9 | 'node_modules/**',
10 | 'dist/**',
11 | 'build/**',
12 | 'out/**',
13 | '.vscode/**',
14 | '.git/**',
15 | '.gitignore',
16 | '.eslintignore',
17 | '.eslintrc',
18 | '.prettierrc',
19 | ],
20 | },
21 | eslint.configs.recommended,
22 | ...tseslint.configs.recommended,
23 | {
24 | files: ['**/*.{js,jsx,ts,tsx}'],
25 | plugins: {
26 | react: reactPlugin,
27 | 'react-hooks': reactHooksPlugin,
28 | },
29 | languageOptions: {
30 | ecmaVersion: 'latest',
31 | sourceType: 'module',
32 | parser: tseslint.parser,
33 | parserOptions: {
34 | ecmaFeatures: { jsx: true },
35 | projectService: true,
36 | },
37 | globals: {
38 | // Browser globals that should be readonly
39 | window: 'readonly',
40 | document: 'readonly',
41 | location: 'readonly',
42 | history: 'readonly',
43 | navigator: 'readonly',
44 |
45 | // Browser globals that can be modified
46 | console: 'writable',
47 | localStorage: 'writable',
48 | sessionStorage: 'writable',
49 |
50 | // Timer functions that can be modified
51 | setTimeout: 'writable',
52 | clearTimeout: 'writable',
53 | setInterval: 'writable',
54 | clearInterval: 'writable',
55 |
56 | // Node.js globals
57 | process: 'readonly',
58 | __dirname: 'readonly',
59 | __filename: 'readonly',
60 |
61 | // React globals
62 | React: 'readonly',
63 | },
64 | },
65 | settings: {
66 | react: {
67 | version: 'detect',
68 | },
69 | },
70 | rules: {
71 | // React specific rules
72 | 'react/react-in-jsx-scope': 'off',
73 | 'react-hooks/rules-of-hooks': 'error',
74 | 'react-hooks/exhaustive-deps': 'warn',
75 |
76 | // TypeScript specific rules
77 | '@typescript-eslint/no-unused-vars': [
78 | 'warn',
79 | {
80 | argsIgnorePattern: '^_',
81 | varsIgnorePattern: '^_',
82 | },
83 | ],
84 |
85 | // General rules
86 | 'no-console': ['warn', { allow: ['warn', 'error'] }],
87 | '@typescript-eslint/no-explicit-any': 'off',
88 |
89 | // Global modification rules
90 | 'no-global-assign': [
91 | 'error',
92 | {
93 | exceptions: ['console', 'localStorage', 'sessionStorage'],
94 | },
95 | ],
96 | },
97 | },
98 | // Add specific configuration for preload files
99 | {
100 | files: ['app/**/*.ts', 'lib/**/*.ts', 'app/**/*.tsx', 'lib/**/*.tsx'],
101 | languageOptions: {
102 | globals: {
103 | process: 'readonly',
104 | console: 'readonly',
105 | window: 'readonly',
106 | },
107 | },
108 | },
109 | ]
110 |
--------------------------------------------------------------------------------
/faces.json:
--------------------------------------------------------------------------------
1 | {}
--------------------------------------------------------------------------------
/known_faces.json:
--------------------------------------------------------------------------------
1 | []
--------------------------------------------------------------------------------
/lib/constants/index.ts:
--------------------------------------------------------------------------------
1 |
2 | import path from 'path';
3 | import { resolve } from 'path';
4 | import 'dotenv/config'
5 |
6 | // General
7 | export const IS_WIN = process.platform === 'win32';
8 |
9 | // Directories
10 | export const THUMBNAILS_DIR = path.resolve('.thumbnails');
11 | export const FACES_DIR = path.resolve('.faces');
12 | export const PROCESSED_VIDEOS_DIR = path.resolve('.results');
13 | export const UNKNOWN_FACES_DIR = resolve('analysis_results/unknown_faces');
14 | export const CACHE_FILE = '.locations.json';
15 |
16 | // Timeouts and Intervals
17 | export const CACHE_TTL = 30 * 60 * 1000; // 30 minutes
18 | export const CACHE_DURATION = 30 * 24 * 60 * 60 * 1000; // 30 days
19 | export const SERVICE_STARTUP_TIMEOUT = 60000; // 60 seconds
20 | export const HEALTH_CHECK_INTERVAL = 1000; // 1 second
21 |
22 | // Service settings
23 | export const MAX_RESTARTS = 10;
24 | export const RESTART_BACKOFF_MS = 1000;
25 | export const EMBEDDING_BATCH_SIZE = 200;
26 | export const MAX_DEPTH = 5;
27 |
28 | // ChromaDB
29 | export const CHROMA_HOST = process.env.CHROMA_HOST || 'localhost';
30 | export const CHROMA_PORT = process.env.CHROMA_PORT || '8000';
31 | export const COLLECTION_NAME = 'video_content';
32 |
33 | // AI Models
34 | export const EMBEDDING_MODEL = 'text-embedding-004';
35 | export const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
36 |
37 |
38 | // Files
39 | export const SUPPORTED_VIDEO_EXTENSIONS = /\.(mp4|mov|avi|mkv)$/i
40 | export const DEFAULT_FPS = 30
41 | export const THUMBNAIL_SCALE = '320:-1'
42 | export const THUMBNAIL_QUALITY = '4'
43 | export const BATCH_THUMBNAIL_QUALITY = '3'
44 |
--------------------------------------------------------------------------------
/lib/conveyor/api/app-api.ts:
--------------------------------------------------------------------------------
1 | import { ConveyorApi } from '@/lib/preload/shared'
2 | import { ExportedScene } from '@/lib/types/scene'
3 | import { VideoMetadataSummary } from '@/lib/types/search'
4 | import { SettingsConfig } from '@/lib/types/settings'
5 |
6 | export class AppApi extends ConveyorApi {
7 | version = () => this.invoke('version')
8 | selectFolder = () => this.invoke('selectFolder')
9 | startIndexing = (videos: string[]) => this.invoke('startIndexing', videos)
10 | getAllVideos = () => this.invoke('getAllVideos')
11 | generateSearchSuggestions = (metadataSummary: VideoMetadataSummary) =>
12 | this.invoke('generateSearchSuggestions', metadataSummary)
13 | searchDocuments = (prompt: string) => this.invoke('searchDocuments', prompt)
14 | stitchVideos = (scenesToStitch: ExportedScene[], videoFilename: string, aspectRatio: string, fps: number) =>
15 | this.invoke('stitchVideos', scenesToStitch, videoFilename, aspectRatio, fps)
16 | exportToFcpXml = (scenesToStitch: ExportedScene[], prompt: string, fcpxmlFilename: string) =>
17 | this.invoke('exportToFcpXml', scenesToStitch, prompt, fcpxmlFilename)
18 | openFile = (filePath: string) => this.invoke('openFile', filePath)
19 | showInFolder = (filePath: string) => this.invoke('showInFolder', filePath)
20 | getSettings = () => this.invoke('getSettings')
21 | saveSettings = (settings: SettingsConfig) => this.invoke('saveSettings', settings)
22 | getKnownFaces = () => this.invoke('getKnownFaces')
23 | getUnknownFaces = () => this.invoke('getUnknownFaces')
24 | deleteUnknownFace = (imageFile: string, jsonFile: string) => this.invoke('deleteUnknownFace', imageFile, jsonFile)
25 | labelUnknownFace = (jsonFile: string, faceId: string, name: string) =>
26 | this.invoke('labelUnknownFace', jsonFile, faceId, name)
27 | reindexAllFaces = (jsonFile: string, faceId: string, name: string) =>
28 | this.invoke('reindexAllFaces', jsonFile, faceId, name)
29 | getAllFaces = () => this.invoke('getAllFaces')
30 | labelFace = (oldName: string, newName: string) => this.invoke('labelFace', oldName, newName)
31 | mergeFaces = (facesToMerge: string[]) => this.invoke('mergeFaces', facesToMerge)
32 | getLocationName = (location: string) => this.invoke('getLocationName', location)
33 | }
34 |
--------------------------------------------------------------------------------
/lib/conveyor/api/index.ts:
--------------------------------------------------------------------------------
1 | import { electronAPI } from '@electron-toolkit/preload'
2 | import { AppApi } from './app-api'
3 | import { WindowApi } from './window-api'
4 |
5 | export const conveyor = {
6 | app: new AppApi(electronAPI),
7 | window: new WindowApi(electronAPI),
8 | }
9 |
10 | export type ConveyorApi = typeof conveyor
11 |
--------------------------------------------------------------------------------
/lib/conveyor/api/window-api.ts:
--------------------------------------------------------------------------------
1 | import { ConveyorApi } from '@/lib/preload/shared'
2 |
3 | export class WindowApi extends ConveyorApi {
4 | // Generate window methods
5 | windowInit = () => this.invoke('window-init')
6 | windowIsMinimizable = () => this.invoke('window-is-minimizable')
7 | windowIsMaximizable = () => this.invoke('window-is-maximizable')
8 | windowMinimize = () => this.invoke('window-minimize')
9 | windowMaximize = () => this.invoke('window-maximize')
10 | windowClose = () => this.invoke('window-close')
11 | windowMaximizeToggle = () => this.invoke('window-maximize-toggle')
12 |
13 | // Generate web methods
14 | webUndo = () => this.invoke('web-undo')
15 | webRedo = () => this.invoke('web-redo')
16 | webCut = () => this.invoke('web-cut')
17 | webCopy = () => this.invoke('web-copy')
18 | webPaste = () => this.invoke('web-paste')
19 | webDelete = () => this.invoke('web-delete')
20 | webSelectAll = () => this.invoke('web-select-all')
21 | webReload = () => this.invoke('web-reload')
22 | webForceReload = () => this.invoke('web-force-reload')
23 | webToggleDevtools = () => this.invoke('web-toggle-devtools')
24 | webActualSize = () => this.invoke('web-actual-size')
25 | webZoomIn = () => this.invoke('web-zoom-in')
26 | webZoomOut = () => this.invoke('web-zoom-out')
27 | webToggleFullscreen = () => this.invoke('web-toggle-fullscreen')
28 | webOpenUrl = (url: string) => this.invoke('web-open-url', url)
29 | }
30 |
--------------------------------------------------------------------------------
/lib/conveyor/conveyor.d.ts:
--------------------------------------------------------------------------------
1 | import type { ConveyorApi } from '@/lib/conveyor/api'
2 |
3 | declare global {
4 | interface Window {
5 | conveyor: ConveyorApi
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/lib/conveyor/handlers/window-handler.ts:
--------------------------------------------------------------------------------
1 | import type { BrowserWindow } from 'electron'
2 | import { shell } from 'electron'
3 | import { handle } from '@/lib/main/shared'
4 | import { electronAPI } from '@electron-toolkit/preload'
5 |
6 | export const registerWindowHandlers = (window: BrowserWindow) => {
7 | // Window operations
8 | handle('window-init', () => {
9 | const { width, height } = window.getBounds()
10 | const minimizable = window.isMinimizable()
11 | const maximizable = window.isMaximizable()
12 | const platform = electronAPI.process.platform
13 |
14 | return { width, height, minimizable, maximizable, platform }
15 | })
16 |
17 | handle('window-is-minimizable', () => window.isMinimizable())
18 | handle('window-is-maximizable', () => window.isMaximizable())
19 | handle('window-minimize', () => window.minimize())
20 | handle('window-maximize', () => window.maximize())
21 | handle('window-close', () => window.close())
22 | handle('window-maximize-toggle', () => (window.isMaximized() ? window.unmaximize() : window.maximize()))
23 |
24 | // Web content operations
25 | const webContents = window.webContents
26 | handle('web-undo', () => webContents.undo())
27 | handle('web-redo', () => webContents.redo())
28 | handle('web-cut', () => webContents.cut())
29 | handle('web-copy', () => webContents.copy())
30 | handle('web-paste', () => webContents.paste())
31 | handle('web-delete', () => webContents.delete())
32 | handle('web-select-all', () => webContents.selectAll())
33 | handle('web-reload', () => webContents.reload())
34 | handle('web-force-reload', () => webContents.reloadIgnoringCache())
35 | handle('web-toggle-devtools', () => webContents.toggleDevTools())
36 | handle('web-actual-size', () => webContents.setZoomLevel(0))
37 | handle('web-zoom-in', () => webContents.setZoomLevel(webContents.zoomLevel + 0.5))
38 | handle('web-zoom-out', () => webContents.setZoomLevel(webContents.zoomLevel - 0.5))
39 | handle('web-toggle-fullscreen', () => window.setFullScreen(!window.fullScreen))
40 | handle('web-open-url', (url: string) => shell.openExternal(url))
41 | }
42 |
--------------------------------------------------------------------------------
/lib/conveyor/schemas/index.ts:
--------------------------------------------------------------------------------
1 | import { z, type ZodPromise, type ZodTypeAny } from 'zod'
2 | import { windowIpcSchema } from './window-schema'
3 | import { appIpcSchema } from './app-schema'
4 | import { progressIpcSchema } from './progress-schema'
5 |
6 | export const ipcSchemas = {
7 | ...windowIpcSchema,
8 | ...appIpcSchema,
9 | ...progressIpcSchema,
10 | } as const
11 |
12 | type SchemaReturn = T extends ZodPromise ? U : T
13 |
14 | export type IPCChannels = {
15 | [K in keyof typeof ipcSchemas]: {
16 | args: z.infer<(typeof ipcSchemas)[K]['args']>
17 | return: z.infer>
18 | }
19 | }
20 |
21 | export type ChannelName = keyof typeof ipcSchemas
22 | export type ChannelArgs = IPCChannels[T]['args']
23 | export type ChannelReturn = IPCChannels[T]['return']
24 |
25 | // Runtime validation helpers
26 | export const validateArgs = (channel: T, args: unknown[]): ChannelArgs => {
27 | return ipcSchemas[channel].args.parse(args) as ChannelArgs
28 | }
29 |
30 | export const validateReturn = (channel: T, data: unknown): ChannelReturn => {
31 | return ipcSchemas[channel].return.parse(data) as ChannelReturn
32 | }
33 |
--------------------------------------------------------------------------------
/lib/conveyor/schemas/progress-schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const progressIpcSchema = {
4 | 'indexing-progress': {
5 | args: z.tuple([
6 | z.object({
7 | video: z.string(),
8 | step: z.enum(['transcription', 'frame-analysis', 'embedding']),
9 | progress: z.number(),
10 | success: z.boolean(),
11 | stepIndex: z.number(),
12 | thumbnailUrl: z.string(),
13 | elapsed: z.string().optional(),
14 | memoryMB: z.number().optional(),
15 | framesProcessed: z.number().optional(),
16 | totalFrames: z.number().optional(),
17 | fps: z.number().optional(),
18 | }),
19 | ]),
20 | return: z.void(),
21 | },
22 | }
23 |
--------------------------------------------------------------------------------
/lib/conveyor/schemas/window-schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const windowIpcSchema = {
4 | 'window-init': {
5 | args: z.tuple([]),
6 | return: z.object({
7 | width: z.number(),
8 | height: z.number(),
9 | minimizable: z.boolean(),
10 | maximizable: z.boolean(),
11 | platform: z.string(),
12 | }),
13 | },
14 | 'window-is-minimizable': {
15 | args: z.tuple([]),
16 | return: z.boolean(),
17 | },
18 | 'window-is-maximizable': {
19 | args: z.tuple([]),
20 | return: z.boolean(),
21 | },
22 | 'window-minimize': {
23 | args: z.tuple([]),
24 | return: z.void(),
25 | },
26 | 'window-maximize': {
27 | args: z.tuple([]),
28 | return: z.void(),
29 | },
30 | 'window-close': {
31 | args: z.tuple([]),
32 | return: z.void(),
33 | },
34 | 'window-maximize-toggle': {
35 | args: z.tuple([]),
36 | return: z.void(),
37 | },
38 |
39 | // Web content operations
40 | 'web-undo': {
41 | args: z.tuple([]),
42 | return: z.void(),
43 | },
44 | 'web-redo': {
45 | args: z.tuple([]),
46 | return: z.void(),
47 | },
48 | 'web-cut': {
49 | args: z.tuple([]),
50 | return: z.void(),
51 | },
52 | 'web-copy': {
53 | args: z.tuple([]),
54 | return: z.void(),
55 | },
56 | 'web-paste': {
57 | args: z.tuple([]),
58 | return: z.void(),
59 | },
60 | 'web-delete': {
61 | args: z.tuple([]),
62 | return: z.void(),
63 | },
64 | 'web-select-all': {
65 | args: z.tuple([]),
66 | return: z.void(),
67 | },
68 | 'web-reload': {
69 | args: z.tuple([]),
70 | return: z.void(),
71 | },
72 | 'web-force-reload': {
73 | args: z.tuple([]),
74 | return: z.void(),
75 | },
76 | 'web-toggle-devtools': {
77 | args: z.tuple([]),
78 | return: z.void(),
79 | },
80 | 'web-actual-size': {
81 | args: z.tuple([]),
82 | return: z.void(),
83 | },
84 | 'web-zoom-in': {
85 | args: z.tuple([]),
86 | return: z.void(),
87 | },
88 | 'web-zoom-out': {
89 | args: z.tuple([]),
90 | return: z.void(),
91 | },
92 | 'web-toggle-fullscreen': {
93 | args: z.tuple([]),
94 | return: z.void(),
95 | },
96 | 'web-open-url': {
97 | args: z.tuple([z.string()]),
98 | return: z.void(),
99 | },
100 | }
101 |
--------------------------------------------------------------------------------
/lib/main/app.ts:
--------------------------------------------------------------------------------
1 | import { BrowserWindow, app } from 'electron'
2 | import { join } from 'path'
3 | import appIcon from '@/resources/build/icon.png?asset'
4 | import {
5 | registerFaceProtocol,
6 | registerResourcesProtocol,
7 | registerThumbnailProtocol,
8 | registerUnknownFaceProtocol,
9 | } from './protocols'
10 | import { registerWindowHandlers } from '@/lib/conveyor/handlers/window-handler'
11 | import { registerAppHandlers } from '@/lib/conveyor/handlers/app-handler'
12 |
13 | export function createAppWindow(): void {
14 | // Register custom protocol for resources
15 | registerResourcesProtocol()
16 | registerThumbnailProtocol()
17 | registerFaceProtocol()
18 | registerUnknownFaceProtocol()
19 |
20 | // Create the main window.
21 | const mainWindow = new BrowserWindow({
22 | width: 1200,
23 | height: 1000,
24 | show: false,
25 | backgroundColor: '#1c1c1c',
26 | icon: appIcon,
27 | titleBarStyle: 'hiddenInset',
28 | title: 'Edit Mind',
29 | webPreferences: {
30 | preload: join(__dirname, '../preload/preload.js'),
31 | sandbox: false,
32 | },
33 | })
34 |
35 | // Register IPC events for the main window.
36 | registerWindowHandlers(mainWindow)
37 | registerAppHandlers(app, mainWindow.webContents)
38 |
39 | mainWindow.on('ready-to-show', () => {
40 | mainWindow.show()
41 | })
42 |
43 | // HMR for renderer base on electron-vite cli.
44 | // Load the remote URL for development or the local html file for production.
45 | if (!app.isPackaged && process.env['ELECTRON_RENDERER_URL']) {
46 | mainWindow.loadURL(process.env['ELECTRON_RENDERER_URL'])
47 | } else {
48 | mainWindow.loadFile(join(__dirname, '../renderer/index.html'))
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/lib/main/index.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | declare module '*.css' {
4 | const content: string
5 | export default content
6 | }
7 |
8 | declare module '*.png' {
9 | const content: string
10 | export default content
11 | }
12 |
13 | declare module '*.jpg' {
14 | const content: string
15 | export default content
16 | }
17 |
18 | declare module '*.jpeg' {
19 | const content: string
20 | export default content
21 | }
22 |
23 | declare module '*.svg' {
24 | const content: string
25 | export default content
26 | }
27 |
28 | declare module '*.web' {
29 | const content: string
30 | export default content
31 | }
32 |
--------------------------------------------------------------------------------
/lib/main/main.ts:
--------------------------------------------------------------------------------
1 | import { app, BrowserWindow } from 'electron'
2 | import { electronApp, optimizer } from '@electron-toolkit/utils'
3 | import { createAppWindow } from './app'
4 | import { pythonService } from '../services/pythonService'
5 |
6 | // This method will be called when Electron has finished
7 | // initialization and is ready to create browser windows.
8 | // Some APIs can only be used after this event occurs.
9 | app.whenReady().then(async () => {
10 | // Set app user model id for windows
11 | electronApp.setAppUserModelId('com.electron')
12 |
13 | // Start the Python analysis service
14 | try {
15 | await pythonService.start()
16 | } catch (error) {
17 | console.error('Failed to start Python service on launch, app will continue without it.', error)
18 | // Optionally, show a dialog to the user
19 | // dialog.showErrorBox('Critical Error', 'Python analysis service failed to start.');
20 | }
21 |
22 |
23 | // Create app window
24 | createAppWindow()
25 |
26 | // Default open or close DevTools by F12 in development
27 | // and ignore CommandOrControl + R in production.
28 | // see https://github.com/alex8088/electron-toolkit/tree/master/packages/utils
29 | app.on('browser-window-created', (_, window) => {
30 | optimizer.watchWindowShortcuts(window)
31 | })
32 |
33 | app.on('activate', function () {
34 | // On macOS it's common to re-create a window in the app when the
35 | // dock icon is clicked and there are no other windows open.
36 | if (BrowserWindow.getAllWindows().length === 0) {
37 | createAppWindow()
38 | }
39 | })
40 | })
41 |
42 | // Quit when all windows are closed, except on macOS. There, it's common
43 | // for applications and their menu bar to stay active until the user quits
44 | // explicitly with Cmd + Q.
45 | app.on('window-all-closed', () => {
46 | if (process.platform !== 'darwin') {
47 | app.quit()
48 | }
49 | })
50 |
51 | // Stop the python service before quitting
52 | app.on('quit', async () => {
53 | await pythonService.stop()
54 | })
55 |
56 | // In this file, you can include the rest of your app's specific main process
57 | // code. You can also put them in separate files and import them here.
58 |
--------------------------------------------------------------------------------
/lib/main/protocols.ts:
--------------------------------------------------------------------------------
1 | import { protocol, net } from 'electron'
2 | import { join } from 'path'
3 | import { pathToFileURL } from 'url'
4 | import fs from 'fs'
5 | import { UNKNOWN_FACES_DIR, THUMBNAILS_DIR } from '@/lib/constants'
6 | export function registerThumbnailProtocol() {
7 | protocol.handle('thumbnail', (request) => {
8 | const url = request.url.split('thumbnail://')[1]
9 | const thumbnailPath = join(THUMBNAILS_DIR, url)
10 |
11 | if (fs.existsSync(thumbnailPath) && fs.statSync(thumbnailPath).size > 0) {
12 | return net.fetch(pathToFileURL(thumbnailPath).toString())
13 | } else {
14 | const fallbackPath = join(__dirname, '../../app/assets/default-fallback-image.png')
15 | return net.fetch(pathToFileURL(fallbackPath).toString())
16 | }
17 | })
18 | }
19 |
20 | export function registerFaceProtocol() {
21 | protocol.handle('face', (request) => {
22 | const url = request.url.split('face://')[1]
23 | const imagePath = url
24 |
25 | if (fs.existsSync(imagePath)) {
26 | return net.fetch(pathToFileURL(imagePath).toString())
27 | } else {
28 | const fallbackPath = join(__dirname, '../../app/assets/default-fallback-image.png')
29 | return net.fetch(pathToFileURL(fallbackPath).toString())
30 | }
31 | })
32 | }
33 | export function registerUnknownFaceProtocol() {
34 | protocol.handle('unknown', (request) => {
35 | const url = request.url.split('unknown://')[1]
36 | const imagePath = join(UNKNOWN_FACES_DIR, url)
37 |
38 | if (fs.existsSync(imagePath)) {
39 | return net.fetch(pathToFileURL(imagePath).toString())
40 | } else {
41 | const fallbackPath = join(__dirname, '../../app/assets/default-fallback-image.png')
42 | return net.fetch(pathToFileURL(fallbackPath).toString())
43 | }
44 | })
45 | }
46 |
47 | export function registerResourcesProtocol() {
48 | protocol.handle('res', async (request) => {
49 | try {
50 | const url = new URL(request.url)
51 | const fullPath = join(url.hostname, url.pathname.slice(1))
52 | const filePath = join(__dirname, '../../resources', fullPath)
53 | return net.fetch(pathToFileURL(filePath).toString())
54 | } catch (error) {
55 | console.error('Protocol error:', error)
56 | return new Response('Resource not found', { status: 404 })
57 | }
58 | })
59 | }
60 |
--------------------------------------------------------------------------------
/lib/main/shared.ts:
--------------------------------------------------------------------------------
1 | import { ipcMain, type WebContents } from 'electron'
2 | import { ipcSchemas, validateArgs, validateReturn, type ChannelArgs, type ChannelReturn } from '@/lib/conveyor/schemas'
3 |
4 | /**
5 | * Helper to create a sender function for a specific webContents
6 | * @param webContents - The webContents to send the message to
7 | * @returns A function to send a message to the specified webContents
8 | */
9 | export const sender = (webContents: WebContents) => {
10 | return (channel: T, ...args: ChannelArgs) => {
11 | webContents.send(channel, ...args)
12 | }
13 | }
14 |
15 | /**
16 | * Helper to register IPC handlers
17 | * @param channel - The IPC channel to register the handler for
18 | * @param handler - The handler function to register
19 | * @returns void
20 | */
21 | export const handle = (
22 | channel: T,
23 | handler: (...args: ChannelArgs) => ChannelReturn | Promise>
24 | ) => {
25 | ipcMain.handle(channel, async (_, ...args) => {
26 | try {
27 | const validatedArgs = validateArgs(channel, args)
28 | const result = await handler(...validatedArgs)
29 |
30 | return validateReturn(channel, result)
31 | } catch (error) {
32 | console.error(`IPC Error in ${channel}:`, error)
33 | throw error
34 | }
35 | })
36 | }
37 |
--------------------------------------------------------------------------------
/lib/preload/preload.ts:
--------------------------------------------------------------------------------
1 | import { contextBridge } from 'electron'
2 | import { conveyor } from '@/lib/conveyor/api'
3 |
4 | // Use `contextBridge` APIs to expose APIs to
5 | // renderer only if context isolation is enabled, otherwise
6 | // just add to the DOM global.
7 | if (process.contextIsolated) {
8 | try {
9 | contextBridge.exposeInMainWorld('conveyor', conveyor)
10 | } catch (error) {
11 | console.error(error)
12 | }
13 | } else {
14 | window.conveyor = conveyor
15 | }
16 |
--------------------------------------------------------------------------------
/lib/preload/shared.ts:
--------------------------------------------------------------------------------
1 | import type { ElectronAPI, IpcRenderer } from '@electron-toolkit/preload'
2 | import type { ChannelName, ChannelArgs, ChannelReturn } from '@/lib/conveyor/schemas'
3 |
4 | export abstract class ConveyorApi {
5 | protected renderer: IpcRenderer
6 |
7 | constructor(electronApi: ElectronAPI) {
8 | this.renderer = electronApi.ipcRenderer
9 | }
10 |
11 | invoke = async (channel: T, ...args: ChannelArgs): Promise> => {
12 | // Call the IPC method without runtime validation in preload
13 | // Validation happens on the main process side
14 | return this.renderer.invoke(channel, ...args) as Promise>
15 | }
16 |
17 | on = (channel: T, callback: (...args: ChannelArgs) => void) => {
18 | const subscription = (_: unknown, ...args: any[]) => {
19 | callback(...(args as ChannelArgs))
20 | }
21 | this.renderer.on(channel, subscription)
22 |
23 | return () => {
24 | this.renderer.removeListener(channel, subscription)
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/lib/types/analysis.ts:
--------------------------------------------------------------------------------
1 | export interface Face {
2 | name: string
3 | location: [number, number, number, number]
4 | emotion?: Record
5 | }
6 |
7 | export interface DetectedObject {
8 | label: string
9 | confidence: number
10 | box: [number, number, number, number]
11 | }
12 |
13 | export interface FrameAnalysis {
14 | timestamp_seconds: number
15 | objects: DetectedObject[]
16 | faces: Face[]
17 | detected_text?: DetectedText[]
18 | start_time_ms: number
19 | end_time_ms: number
20 | scene_description: string
21 | shot_type: string
22 | dominant_color: {
23 | name: string
24 | hex: string
25 | percentage: number
26 | is_vibrant: boolean
27 | is_muted: boolean
28 | }
29 | }
30 |
31 | export interface DetectedText {
32 | text: string
33 | confidence: number
34 | bounding_box: [[number, number], [number, number], [number, number], [number, number]]
35 | }
36 |
37 | export interface SceneAnalysis {
38 | environment: string
39 | environment_confidence: number
40 | object_distribution: { [key: string]: number }
41 | total_frames: number
42 | }
43 |
44 | export interface DetectedActivity {
45 | activity: string
46 | confidence: number
47 | indicators: string[]
48 | primary_objects: string[]
49 | }
50 |
51 | export interface FaceRecognitionSummary {
52 | known_people_identified: string[]
53 | unknown_faces_detected: number
54 | total_faces_detected: number
55 | all_faces: {
56 | timestamp: number
57 | name: string
58 | }[]
59 | unknown_faces_timestamps: number[]
60 | }
61 |
62 | export interface AnalysisSummary {
63 | total_frames_analyzed: number
64 | primary_activity: string
65 | confidence: number
66 | }
67 |
68 | export interface Analysis {
69 | video_file: string
70 | scene_analysis: SceneAnalysis
71 | detected_activities: DetectedActivity[]
72 | face_recognition_summary: FaceRecognitionSummary
73 | frame_analysis: FrameAnalysis[]
74 | summary: AnalysisSummary
75 | }
76 |
77 | export interface AnalysisProgress {
78 | plugin: string
79 | progress: number
80 | message: string
81 | elapsed: string
82 | frames_analyzed: number
83 | total_frames: number
84 | }
--------------------------------------------------------------------------------
/lib/types/face.ts:
--------------------------------------------------------------------------------
1 | import z from "zod";
2 | import { unknownFace } from "../conveyor/schemas/app-schema";
3 |
4 | export type UnknownFace = z.infer
5 |
6 |
7 | export interface FaceIndexingProgress {
8 | progress: number
9 | elapsed: string
10 | }
--------------------------------------------------------------------------------
/lib/types/gopro.ts:
--------------------------------------------------------------------------------
1 | export interface GoProMetadata {
2 | [key: string]: any;
3 | 'device name'?: string;
4 | }
--------------------------------------------------------------------------------
/lib/types/index.ts:
--------------------------------------------------------------------------------
1 | export type ShotType = 'medium-shot' | 'long-shot' | 'close-up'
2 | export type AspectRatio = '16:9' | '9:16' | '1:1' | '4:3' | '8:7'
--------------------------------------------------------------------------------
/lib/types/scene.ts:
--------------------------------------------------------------------------------
1 | import z from 'zod'
2 | import { exportedSceneSchema, sceneSchema } from '../conveyor/schemas/app-schema'
3 |
4 | export type Scene = z.infer
5 |
6 | export type ExportedScene = z.infer
7 |
--------------------------------------------------------------------------------
/lib/types/search.ts:
--------------------------------------------------------------------------------
1 | import z from 'zod'
2 | import { searchSuggestionSchema, VideoMetadataSummarySchema } from '../conveyor/schemas/app-schema'
3 | import { ShotType, AspectRatio } from '.'
4 |
5 | export interface VideoSearchParams {
6 | action: string
7 | emotions: string[]
8 | shot_type: ShotType | null
9 | aspect_ratio: AspectRatio | null
10 | duration: number | null
11 | description: string
12 | outputFilename: string
13 | objects: string[]
14 | camera?: string
15 | transcriptionQuery?: string
16 | detectedText?: string
17 | }
18 |
19 | export type SearchQuery = {
20 | faces?: string[]
21 | emotions?: string[]
22 | shot_type?: ShotType | null
23 | aspect_ratio?: AspectRatio | null
24 | description?: string
25 | objects?: string[]
26 | camera?: string
27 | transcriptionQuery?: string
28 | detectedText?: string
29 | }
30 |
31 | export type SearchSuggestion = z.infer
32 |
33 | export type VideoMetadataSummary = z.infer
34 |
35 | export interface FaceData {
36 | name: string
37 | count: number
38 | thumbnail?: string
39 | }
40 | export type LoadedFaces = Record
41 |
42 | export interface GenerationResult {
43 | message: string
44 | videoPath: string
45 | fcpxmlPath: string
46 | }
47 |
48 | export interface VideoConfig {
49 | aspectRatio: string
50 | fps: number
51 | }
52 |
53 | export interface SearchMetadata {
54 | aspectRatio?: AspectRatio
55 | faces?: string[]
56 | }
57 |
--------------------------------------------------------------------------------
/lib/types/settings.ts:
--------------------------------------------------------------------------------
1 |
2 | export type SettingsConfig = {
3 | sample_interval_seconds: number;
4 | max_workers: number;
5 | batch_size: number;
6 | yolo_confidence: number;
7 | yolo_iou: number;
8 | resize_to_720p: boolean;
9 | yolo_model: string;
10 | output_dir: string;
11 | };
12 |
--------------------------------------------------------------------------------
/lib/types/transcription.ts:
--------------------------------------------------------------------------------
1 | interface TranscriptionWord {
2 | word: string;
3 | start: number;
4 | end: number;
5 | probability: number;
6 | }
7 |
8 | interface TranscriptionSegment {
9 | id: number;
10 | seek: number;
11 | start: number;
12 | end: number;
13 | text: string;
14 | tokens: number[];
15 | temperature: number;
16 | avg_logprob: number;
17 | compression_ratio: number;
18 | no_speech_prob: number;
19 | words: TranscriptionWord[];
20 | }
21 |
22 | export interface Transcription {
23 | text: string;
24 | segments: TranscriptionSegment[];
25 | language: string;
26 | }
27 | export type TranscriptionProgress = {
28 | progress: number
29 | elapsed: string
30 | }
--------------------------------------------------------------------------------
/lib/types/vector.ts:
--------------------------------------------------------------------------------
1 | interface Metadata {
2 | [key: string]: string | number | boolean | undefined
3 | }
4 |
5 | export interface AddDocumentsData {
6 | ids: string[]
7 | documents: string[]
8 | metadatas: Metadata[]
9 | }
10 |
11 | export interface FilterData {
12 | filters?: {
13 | faces?: string[]
14 | objects?: string[]
15 | emotions?: string[]
16 | }
17 | }
18 |
19 | export interface EmbeddingInput {
20 | id: string
21 | text: string
22 | metadata?: Record
23 | }
24 |
25 | export interface CollectionStatistics {
26 | name: string
27 | totalDocuments: number
28 | embeddingDimension: number | null
29 | metadataKeys: string[]
30 | documentIds: string[]
31 | }
32 |
--------------------------------------------------------------------------------
/lib/types/video.ts:
--------------------------------------------------------------------------------
1 | import z from "zod"
2 | import { Scene } from "./scene"
3 | import { videoSchema } from "../conveyor/schemas/app-schema"
4 |
5 |
6 | export type Video = z.infer
7 |
8 | export interface VideoWithScenes extends Video {
9 | scenes: Scene[]
10 | sceneCount: number
11 | }
12 | export interface VideoMetadata {
13 | duration: number
14 | fps: number
15 | width: number
16 | height: number
17 | totalFrames: number
18 | }
19 |
20 | export interface VideoFile {
21 | path: string
22 | mtime: Date
23 | }
24 |
25 | export interface CameraInfo {
26 | camera: string
27 | createdAt: string
28 | }
29 |
30 | export interface GeoLocation {
31 | latitude?: number
32 | longitude?: number
33 | altitude?: number
34 | }
35 |
36 | export interface FFmpegError extends Error {
37 | code?: number
38 | stderr?: string
39 | }
40 |
41 | export interface Dimensions {
42 | width: number
43 | height: number
44 | }
45 |
46 | export interface FFmpegProcessResult {
47 | code: number
48 | stderr: string
49 | }
50 |
51 | export interface VideoMetadataMap {
52 | faces: Map;
53 | objects: Map;
54 | emotions: Map;
55 | shotTypes: Map;
56 | aspectRatios: Map;
57 | cameras: Map;
58 | descriptions: string[];
59 | totalScenes: number;
60 | colors: Map;
61 | }
--------------------------------------------------------------------------------
/lib/utils.ts:
--------------------------------------------------------------------------------
1 | import { clsx, type ClassValue } from 'clsx'
2 | import { twMerge } from 'tailwind-merge'
3 |
4 | export function cn(...inputs: ClassValue[]) {
5 | return twMerge(clsx(inputs))
6 | }
7 |
8 | export function gcd(a: number, b: number): number {
9 | return b === 0 ? a : gcd(b, a % b)
10 | }
--------------------------------------------------------------------------------
/lib/utils/fcpxml.ts:
--------------------------------------------------------------------------------
1 | import { spawn } from "child_process";
2 | import path from "path";
3 |
4 | const PYTHON_EXECUTABLE = path.resolve("../.venv/bin/python");
5 | const TIMELINE_GENERATOR_PATH = path.resolve("../python/timeline_generator.py");
6 |
7 | /**
8 | * Generates an FCPXML file from a clips.json file using the Python service.
9 | * @param clipsJsonPath The absolute path to the clips.json file.
10 | * @param outputDir The directory where the final FCPXML file should be saved.
11 | * @returns The path to the generated FCPXML file.
12 | */
13 | export function exportToFcpXml(
14 | clipsJsonPath: string,
15 | outputFilename: string
16 | ): Promise {
17 | return new Promise((resolve, reject) => {
18 | const pyProcess = spawn(PYTHON_EXECUTABLE, [
19 | TIMELINE_GENERATOR_PATH,
20 | clipsJsonPath,
21 | outputFilename,
22 | ]);
23 |
24 | let stderr = "";
25 | pyProcess.stderr.on("data", (data: any) => {
26 | stderr += data.toString();
27 | });
28 |
29 | pyProcess.on("close", (code: any) => {
30 | if (code === 0) {
31 | resolve(outputFilename);
32 | } else {
33 | reject(
34 | new Error(
35 | `Timeline generator exited with code ${code}. Stderr: ${stderr}`
36 | )
37 | );
38 | }
39 | });
40 | });
41 | }
42 |
--------------------------------------------------------------------------------
/lib/utils/ffmpeg.ts:
--------------------------------------------------------------------------------
1 | import { spawn, ChildProcess } from 'child_process'
2 | import ffprobeStatic from 'ffmpeg-ffprobe-static'
3 | import fs from 'fs'
4 |
5 | export const validateBinaries = (): void => {
6 | if (!ffprobeStatic.ffmpegPath) {
7 | throw new Error('FFmpeg binary not found. Please ensure ffmpeg-static is properly installed.')
8 | }
9 | if (!ffprobeStatic?.ffprobePath) {
10 | throw new Error('FFprobe binary not found. Please ensure ffprobe-static is properly installed.')
11 | }
12 | }
13 |
14 | export const spawnFFmpeg = (args: string[]): ChildProcess => {
15 | validateBinaries()
16 |
17 | const ffmpegPath = ffprobeStatic.ffmpegPath!
18 | if (!fs.existsSync(ffmpegPath)) {
19 | throw new Error(`FFmpeg binary not found at path: ${ffmpegPath}`)
20 | }
21 |
22 | return spawn(ffmpegPath, args)
23 | }
24 |
--------------------------------------------------------------------------------
/lib/utils/file.ts:
--------------------------------------------------------------------------------
1 | import fs from 'fs'
2 |
3 | export const validateFile = async (filePath: string): Promise => {
4 | try {
5 | await fs.promises.access(filePath, fs.constants.R_OK)
6 | } catch {
7 | throw new Error(`Cannot access file: ${filePath}`)
8 | }
9 | }
10 |
11 | export const cleanupFiles = (files: string[]): void => {
12 | files.forEach((file) => {
13 | try {
14 | if (fs.existsSync(file)) {
15 | fs.unlinkSync(file)
16 | }
17 | } catch (error) {
18 | console.warn(`Failed to delete file ${file}:`, error instanceof Error ? error.message : 'Unknown error')
19 | }
20 | })
21 | }
22 |
23 |
24 | export const ensureDirectoryExists = (dirPath: string): void => {
25 | if (!fs.existsSync(dirPath)) {
26 | fs.mkdirSync(dirPath, { recursive: true })
27 | }
28 | }
--------------------------------------------------------------------------------
/lib/utils/frameAnalyze.ts:
--------------------------------------------------------------------------------
1 | import { Analysis, AnalysisProgress } from '../types/analysis'
2 | import { pythonService } from '../services/pythonService'
3 |
4 | /**
5 | * Analyzes a video file using the persistent Python analysis service.
6 | * @param videoPath The full path to the video file.
7 | * @param onProgress Callback for progress updates.
8 | * @param onResult Callback for when the analysis is complete.
9 | * @param onError Callback for any errors that occur.
10 | */
11 | export function analyzeVideo(
12 | videoPath: string,
13 | onProgress: (progress: AnalysisProgress) => void,
14 | onResult: (result: { analysis: Analysis; category: string }) => void,
15 | onError: (error: Error) => void
16 | ): void {
17 | try {
18 | pythonService.analyzeVideo(
19 | videoPath,
20 | (progress) => {
21 | onProgress(progress)
22 | },
23 | (result) => {
24 | let category = 'Uncategorized'
25 | if (result?.scene_analysis?.environment) {
26 | const env = result.scene_analysis.environment
27 | category = env.charAt(0).toUpperCase() + env.slice(1).replace(/_/g, ' ')
28 | }
29 | onResult({ analysis: result, category })
30 | },
31 | (error) => {
32 | console.error('Video analysis failed:', error)
33 | onError(error)
34 | }
35 | )
36 | } catch (error) {
37 | console.error('Video analysis failed:', error)
38 | onError(error as Error)
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/lib/utils/gopro.ts:
--------------------------------------------------------------------------------
1 | import goproTelemetry from 'gopro-telemetry'
2 | import gpmfExtract from 'gpmf-extract'
3 | import { readFileSync } from 'node:fs'
4 | import { GoProMetadata } from '../types/gopro'
5 |
6 | export async function getGoProVideoMetadata(videoFullPath: string) {
7 | return new Promise((res, rej) => {
8 | const file = readFileSync(videoFullPath)
9 | gpmfExtract(file)
10 | .then((extracted) => {
11 | goproTelemetry(extracted, {}, (telemetry) => {
12 | res(telemetry)
13 | })
14 | })
15 | .catch(rej)
16 | })
17 | }
18 | export function getGoProDeviceName(metadata: GoProMetadata): string {
19 | for (const key in metadata) {
20 | if (metadata[key]?.['device name']) {
21 | return metadata[key]['device name']
22 | }
23 | }
24 | return 'Unknown GoPro Device'
25 | }
26 |
27 | export function extractGPS(metadata: GoProMetadata): { lat: number; lon: number; alt?: number }[] {
28 | const gpsData: { lat: number; lon: number; alt?: number }[] = []
29 |
30 | if (metadata.streams?.GPS5) {
31 | const gps5 = metadata.streams.GPS5
32 |
33 | gps5.samples.slice(0, 5).forEach((sample: any) => {
34 | if (Array.isArray(sample.value)) {
35 | const [lat, lon, alt] = sample.value
36 | gpsData.push({ lat, lon, alt })
37 | }
38 | })
39 | }
40 |
41 | return gpsData
42 | }
43 |
--------------------------------------------------------------------------------
/lib/utils/location.ts:
--------------------------------------------------------------------------------
1 | import fetch from 'node-fetch'
2 | import * as fs from 'fs/promises'
3 |
4 | interface LocationCache {
5 | [key: string]: {
6 | name: string
7 | timestamp: number
8 | }
9 | }
10 |
11 | import { CACHE_FILE, CACHE_DURATION } from '@/lib/constants';
12 |
13 | export const formatLocation = (lat: number | undefined, lon: number | undefined, alt: number | undefined): string => {
14 | if (lat === undefined || lon === undefined) return ''
15 |
16 | const latDir = lat >= 0 ? 'N' : 'S'
17 | const lonDir = lon >= 0 ? 'E' : 'W'
18 | const latAbs = Math.abs(lat).toFixed(6)
19 | const lonAbs = Math.abs(lon).toFixed(6)
20 |
21 | let locationStr = `${latAbs}°${latDir}, ${lonAbs}°${lonDir}`
22 |
23 | if (alt !== undefined) {
24 | const altNum = typeof alt === 'number' ? alt : Number(alt)
25 | if (!isNaN(altNum)) {
26 | locationStr += ` (${altNum.toFixed(1)}m)`
27 | }
28 | }
29 |
30 | return locationStr
31 | }
32 |
33 | /*
34 | **
35 | * Parse formatted location string back to coordinates
36 | * Supports formats:
37 | * - "37.123456°N, 122.123456°W"
38 | * - "37.123456°N, 122.123456°W (100.0m)"
39 | * - "37.123456, -122.123456"
40 | */
41 | export function parseLocation(locationStr: string): { lat: number; lon: number } | null {
42 | if (!locationStr) return null
43 |
44 | // Try format with N/S/E/W
45 | const degreeMatch = locationStr.match(/(-?\d+\.?\d*)°([NS]),\s*(-?\d+\.?\d*)°([EW])/i)
46 | if (degreeMatch) {
47 | let lat = parseFloat(degreeMatch[1])
48 | let lon = parseFloat(degreeMatch[3])
49 |
50 | // Apply direction
51 | if (degreeMatch[2].toUpperCase() === 'S') lat = -lat
52 | if (degreeMatch[4].toUpperCase() === 'W') lon = -lon
53 |
54 | return { lat, lon }
55 | }
56 |
57 | // Try simple decimal format
58 | const decimalMatch = locationStr.match(/^(-?\d+\.?\d*),\s*(-?\d+\.?\d*)/)
59 | if (decimalMatch) {
60 | return {
61 | lat: parseFloat(decimalMatch[1]),
62 | lon: parseFloat(decimalMatch[2]),
63 | }
64 | }
65 |
66 | return null
67 | }
68 |
69 | async function loadCache(): Promise {
70 | try {
71 | const data = await fs.readFile(CACHE_FILE, 'utf-8')
72 | return JSON.parse(data)
73 | } catch {
74 | return {}
75 | }
76 | }
77 |
78 | async function saveCache(cache: LocationCache): Promise {
79 | await fs.writeFile(CACHE_FILE, JSON.stringify(cache, null, 2), 'utf-8')
80 | }
81 |
82 | export async function getLocationName(location: string): Promise {
83 | const locationData = parseLocation(location)
84 |
85 | if (!locationData || !locationData.lat || !locationData.lon) {
86 | return location
87 | }
88 |
89 | const lat = Number(locationData.lat)
90 | const lon = Number(locationData.lon)
91 |
92 | if (isNaN(lat) || isNaN(lon) || lat < -90 || lat > 90 || lon < -180 || lon > 180) {
93 | return location
94 | }
95 |
96 | const cacheKey = `${lat.toFixed(6)},${lon.toFixed(6)}`
97 |
98 | const cache = await loadCache()
99 | if (cache[cacheKey]) {
100 | const cached = cache[cacheKey]
101 | const age = Date.now() - cached.timestamp
102 | if (age < CACHE_DURATION) {
103 | return cached.name
104 | }
105 | }
106 |
107 | const url = `https://nominatim.openstreetmap.org/reverse?format=json&lat=${lat}&lon=${lon}&zoom=14&accept-language=en`
108 |
109 | const response = await fetch(url, {
110 | headers: {
111 | 'User-Agent': 'EditMind-VideoEditor/1.0',
112 | },
113 | })
114 |
115 | if (!response.ok) {
116 | const errorBody = await response.text()
117 | console.error('Nominatim error:', errorBody)
118 | throw new Error(`Geocoding failed: ${response.status} - ${errorBody}`)
119 | }
120 |
121 | const data = await response.json()
122 |
123 | let locationName: string
124 |
125 | if (data.address) {
126 | const addr = data.address
127 | const city = addr.city || addr.town || addr.village || addr.hamlet
128 | const country = addr.country
129 |
130 | if (city && country) {
131 | locationName = `${city}, ${country}`
132 | } else if (country) {
133 | locationName = country
134 | } else {
135 | locationName = data.display_name
136 | }
137 | } else {
138 | locationName = formatLocation(lat, lon, undefined)
139 | }
140 |
141 | cache[cacheKey] = {
142 | name: locationName,
143 | timestamp: Date.now(),
144 | }
145 | await saveCache(cache)
146 |
147 | return locationName
148 | }
149 |
--------------------------------------------------------------------------------
/lib/utils/scenes.ts:
--------------------------------------------------------------------------------
1 | import path from 'path'
2 | import { Analysis, DetectedObject, Face } from '../types/analysis'
3 | import { Scene } from '../types/scene'
4 | import { Transcription } from '../types/transcription'
5 |
6 | export const generateSceneDescription = (objects: DetectedObject[], faces: Face[]): string => {
7 | const objectCounts: Record = {}
8 | if (!Array.isArray(objects)) {
9 | return ''
10 | }
11 | for (const obj of objects) {
12 | objectCounts[obj.label] = (objectCounts[obj.label] || 0) + 1
13 | }
14 |
15 | if (faces.length > 0 && objectCounts['person']) {
16 | delete objectCounts['person']
17 | }
18 |
19 | const descriptionParts: string[] = []
20 |
21 | for (const [obj, count] of Object.entries(objectCounts)) {
22 | if (count > 1) {
23 | const pluralObj = obj.endsWith('s') ? obj : `${obj}s`
24 | descriptionParts.push(`${count} ${pluralObj}`)
25 | } else {
26 | descriptionParts.push(`a ${obj}`)
27 | }
28 | }
29 |
30 | if (faces && faces.length > 0) {
31 | if (faces.length === 1) {
32 | descriptionParts.push('a person')
33 | } else {
34 | descriptionParts.push(`${faces.length} people`)
35 | }
36 | }
37 |
38 | if (descriptionParts.length === 0) {
39 | return 'No objects or people detected.'
40 | }
41 |
42 | let description: string
43 | if (descriptionParts.length > 1) {
44 | description = descriptionParts.slice(0, -1).join(', ') + ` and ${descriptionParts[descriptionParts.length - 1]}`
45 | } else {
46 | description = descriptionParts[0]
47 | }
48 |
49 | return `A scene with ${description}.`
50 | }
51 | export const createScenes = async (
52 | analysis: Analysis,
53 | transcription: Transcription | null,
54 | videoPath: string
55 | ): Promise => {
56 | const scenes: Scene[] = []
57 |
58 | const getTranscriptionForTimeRange = (startTime: number, endTime: number): string => {
59 | if (!transcription?.segments) return ''
60 |
61 | const words: string[] = []
62 | for (const segment of transcription.segments) {
63 | for (const word of segment.words) {
64 | if (
65 | (word.start >= startTime && word.start <= endTime) ||
66 | (word.end >= startTime && word.end <= endTime) ||
67 | (word.start <= startTime && word.end >= endTime)
68 | ) {
69 | words.push(word.word.trim())
70 | }
71 | }
72 | }
73 |
74 | return words.join(' ')
75 | }
76 |
77 | for (const frame of analysis.frame_analysis) {
78 | const startTime = frame.start_time_ms / 1000
79 | const endTime = frame.end_time_ms / 1000
80 |
81 | const currentScene: Scene = {
82 | id: 'scene_' + (scenes.length + 1) + '_' + path.basename(videoPath),
83 | startTime,
84 | endTime,
85 | objects: frame.objects?.map((obj: DetectedObject) => obj.label) || [],
86 | faces: frame.faces?.map((face: Face) => face.name) || [],
87 | transcription: getTranscriptionForTimeRange(startTime, endTime),
88 | description: generateSceneDescription(frame.objects, frame.faces),
89 | shot_type: frame.shot_type,
90 | emotions: [],
91 | source: videoPath,
92 | camera: '',
93 | createdAt: '',
94 | dominantColorHex: frame.dominant_color.hex,
95 | dominantColorName: frame.dominant_color.name,
96 | detectedText: frame.detected_text?.map((item) => item.text) || [],
97 | location: '',
98 | duration: 0,
99 | }
100 |
101 | scenes.push(currentScene)
102 | }
103 |
104 | return scenes
105 | }
106 |
--------------------------------------------------------------------------------
/lib/utils/search.ts:
--------------------------------------------------------------------------------
1 | import { SearchSuggestion, VideoMetadataSummary } from '../types/search'
2 |
3 | export const generateSearchSuggestions = (metadataSummary: VideoMetadataSummary): SearchSuggestion[] => {
4 | const suggestions: SearchSuggestion[] = []
5 |
6 | if (metadataSummary.topFaces.length > 0) {
7 | suggestions.push({
8 | text: `scenes with @${metadataSummary.topFaces[0].name}`,
9 | icon: '👤',
10 | category: 'people',
11 | })
12 | }
13 | if (metadataSummary.topColors.length > 0) {
14 | suggestions.push({
15 | text: `scenes with ${metadataSummary.topColors[0].name}'s color`,
16 | icon: '🎨',
17 | category: 'color',
18 | })
19 | }
20 |
21 | if (metadataSummary.topEmotions.length > 0) {
22 | suggestions.push({
23 | text: `${metadataSummary.topEmotions[0].name} moments`,
24 | icon: '😊',
25 | category: 'emotion',
26 | })
27 | }
28 |
29 | if (metadataSummary.shotTypes.length > 0) {
30 | suggestions.push({
31 | text: `${metadataSummary.shotTypes[0].name.replace('-', ' ')}s`,
32 | icon: '🎬',
33 | category: 'scene',
34 | })
35 | }
36 |
37 | if (metadataSummary.topObjects.length > 0) {
38 | suggestions.push({
39 | text: `scenes with ${metadataSummary.topObjects[0].name}`,
40 | icon: '📍',
41 | category: 'scene',
42 | })
43 | }
44 |
45 | return suggestions.slice(0, 5)
46 | }
47 |
--------------------------------------------------------------------------------
/lib/utils/time.ts:
--------------------------------------------------------------------------------
1 | import { parse } from 'date-fns';
2 |
3 | export function convertTimeToWords(time?: string | number): string {
4 | if (time == null) return '0 seconds';
5 |
6 | let totalSeconds: number;
7 |
8 | if (typeof time === 'number') {
9 | totalSeconds = time;
10 | } else if (typeof time === 'string') {
11 | const parsedDate = parse(time, 'mm:ss', new Date(0));
12 | totalSeconds = parsedDate.getMinutes() * 60 + parsedDate.getSeconds();
13 | } else {
14 | return '0 seconds';
15 | }
16 |
17 | const minutes = Math.floor(totalSeconds / 60);
18 | const seconds = Math.floor(totalSeconds % 60);
19 |
20 | const minuteText = minutes === 1 ? 'minute' : 'minutes';
21 | const secondText = seconds === 1 ? 'second' : 'seconds';
22 |
23 | if (minutes > 0 && seconds > 0) {
24 | return `${minutes} ${minuteText} and ${seconds} ${secondText}`;
25 | } else if (minutes > 0) {
26 | return `${minutes} ${minuteText}`;
27 | } else if (seconds > 0) {
28 | return `${seconds} ${secondText}`;
29 | } else {
30 | return '0 seconds';
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/lib/utils/transcribe.ts:
--------------------------------------------------------------------------------
1 | import { pythonService } from '../services/pythonService'
2 | import { TranscriptionProgress } from '../types/transcription'
3 |
4 |
5 |
6 | type ProgressCallback = (progress: TranscriptionProgress) => void
7 |
8 | export async function transcribeAudio(
9 | videoFilePath: string,
10 | transcriptionPath: string,
11 | onProgress?: ProgressCallback
12 | ): Promise<{ path: string } | undefined> {
13 | return new Promise((resolve) => {
14 | try {
15 | pythonService.transcribe(
16 | videoFilePath,
17 | transcriptionPath,
18 | (progress) => {
19 | if (onProgress) onProgress(progress)
20 | },
21 | (_result) => {
22 | if (onProgress) resolve({ path: transcriptionPath })
23 | },
24 | (error) => {
25 | console.error('Video transcription failed:', error)
26 | }
27 | )
28 | } catch (error) {
29 | console.error('Video transcription failed:', error)
30 | }
31 | })
32 | }
33 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "edit-mind",
3 | "version": "0.1.0",
4 | "description": "AI-Powered Video Indexing and Search",
5 | "main": "./out/main/main.js",
6 | "license": "MIT",
7 | "author": {
8 | "name": "ilias",
9 | "url": "https://github.com/iliashad"
10 | },
11 | "scripts": {
12 | "dev": "cross-env ELECTRON_DISABLE_SANDBOX=1 electron-vite dev -w",
13 | "format": "prettier --write .",
14 | "lint": "eslint . --ext .ts,.tsx --fix",
15 | "start": "electron-vite preview",
16 | "postinstall": "electron-builder install-app-deps",
17 | "vite:build:app": "electron-vite build",
18 | "electron:build:win": "electron-builder --win",
19 | "electron:build:mac": "electron-builder --mac",
20 | "electron:build:linux": "electron-builder --linux",
21 | "electron:build:dir": "electron-builder --dir",
22 | "build:unpack": "run-s vite:build:app electron:build:dir",
23 | "build:win": "run-s vite:build:app electron:build:win",
24 | "build:mac": "run-s vite:build:app electron:build:mac",
25 | "build:linux": "run-s vite:build:app electron:build:linux",
26 | "test": "vitest"
27 | },
28 | "repository": {
29 | "type": "git",
30 | "url": "https://github.com/iliashad/edit-mind"
31 | },
32 | "dependencies": {
33 | "@chroma-core/default-embed": "^0.1.8",
34 | "@electron-toolkit/preload": "^3.0.2",
35 | "@electron-toolkit/utils": "^4.0.0",
36 | "@google/genai": "^1.25.0",
37 | "@google/generative-ai": "^0.24.1",
38 | "@radix-ui/react-label": "^2.1.7",
39 | "@radix-ui/react-progress": "^1.1.7",
40 | "@radix-ui/react-scroll-area": "^1.2.10",
41 | "@radix-ui/react-select": "^2.2.6",
42 | "@radix-ui/react-slot": "^1.2.3",
43 | "@radix-ui/react-switch": "^1.2.6",
44 | "@radix-ui/react-tabs": "^1.1.13",
45 | "@types/fluent-ffmpeg": "^2.1.27",
46 | "canvas-confetti": "^1.9.3",
47 | "chromadb": "^3.0.17",
48 | "class-variance-authority": "^0.7.1",
49 | "clsx": "^2.1.1",
50 | "date-fns": "^4.1.0",
51 | "exiftool-vendored": "^31.1.0",
52 | "ffmpeg-ffprobe-static": "^6.1.2-rc.1",
53 | "fluent-ffmpeg": "^2.1.3",
54 | "gopro-telemetry": "^1.2.11",
55 | "gpmf-extract": "^0.3.2",
56 | "langchain": "^0.1.36",
57 | "lucide-react": "^0.541.0",
58 | "node-whisper": "^2024.11.13",
59 | "radix-ui": "^1.4.3",
60 | "react-error-boundary": "^6.0.0",
61 | "react-router-dom": "^7.9.4",
62 | "tailwind-merge": "^3.3.1",
63 | "tw-animate-css": "^1.3.7",
64 | "ws": "^8.18.3",
65 | "zod": "^4.1.3"
66 | },
67 | "devDependencies": {
68 | "@electron-toolkit/eslint-config-prettier": "^3.0.0",
69 | "@electron-toolkit/tsconfig": "^1.0.1",
70 | "@eslint/js": "^9.34.0",
71 | "@rushstack/eslint-patch": "^1.12.0",
72 | "@tailwindcss/vite": "^4.1.12",
73 | "@types/react": "^19.1.11",
74 | "@types/react-dom": "^19.1.8",
75 | "@types/ws": "^8.18.1",
76 | "@vitejs/plugin-react": "^5.0.1",
77 | "cross-env": "^10.0.0",
78 | "dotenv": "^17.2.3",
79 | "electron": "^37.3.1",
80 | "electron-builder": "^26.0.12",
81 | "electron-vite": "^4.0.0",
82 | "eslint": "^9.34.0",
83 | "eslint-plugin-react": "^7.37.5",
84 | "eslint-plugin-react-hooks": "^5.2.0",
85 | "framer-motion": "^12.23.12",
86 | "npm-run-all": "^4.1.5",
87 | "prettier": "^3.6.2",
88 | "react": "^19.1.1",
89 | "react-dom": "^19.1.1",
90 | "tailwindcss": "^4.1.12",
91 | "typescript": "^5.9.2",
92 | "typescript-eslint": "^8.41.0",
93 | "vite": "^7.1.3",
94 | "vitest": "^3.2.4"
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/python/add_face.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import json
4 | import face_recognition
5 |
6 | def add_face(name, image_path, known_faces_file):
7 | """
8 | Extracts a face encoding from an image and adds it to the known faces file.
9 | """
10 | if not os.path.exists(image_path):
11 | print(json.dumps({"status": "error", "message": "Image file not found."}), file=sys.stderr)
12 | sys.exit(1)
13 |
14 | try:
15 | image = face_recognition.load_image_file(image_path)
16 | face_encodings = face_recognition.face_encodings(image)
17 |
18 | if len(face_encodings) == 0:
19 | print(json.dumps({"status": "warning", "message": "No face detected in image."}), file=sys.stderr)
20 | sys.exit(0) # Not an error, just no face found
21 |
22 | if len(face_encodings) > 1:
23 | print(json.dumps({"status": "warning", "message": "Multiple faces found. Using the first one."}), file=sys.stderr)
24 |
25 | new_encoding = face_encodings[0].tolist()
26 |
27 | # Load existing known faces
28 | known_faces = []
29 | if os.path.exists(known_faces_file):
30 | try:
31 | with open(known_faces_file, 'r') as f:
32 | content = f.read().strip()
33 | if content: # Only parse if file is not empty
34 | known_faces = json.load(open(known_faces_file, 'r'))
35 | else:
36 | known_faces = []
37 | except json.JSONDecodeError as e:
38 | print(json.dumps({
39 | "status": "error",
40 | "message": f"Corrupted known_faces.json: {e.msg} at line {e.lineno}"
41 | }), file=sys.stderr)
42 | sys.exit(1)
43 |
44 | # Add new face encoding in the format expected by FaceRecognizer
45 | # Format: [{"name": "Person Name", "encoding": [...]}, ...]
46 | known_faces.append({
47 | "name": name,
48 | "encoding": new_encoding
49 | })
50 |
51 | # Write back to file with proper formatting
52 | with open(known_faces_file, 'w') as f:
53 | json.dump(known_faces, f, indent=2)
54 |
55 | print(json.dumps({
56 | "status": "success",
57 | "message": f"Face for {name} added successfully.",
58 | "total_encodings": len(known_faces)
59 | }))
60 | sys.exit(0)
61 |
62 | except Exception as e:
63 | print(json.dumps({
64 | "status": "error",
65 | "message": f"Error processing image: {str(e)}"
66 | }), file=sys.stderr)
67 | import traceback
68 | traceback.print_exc(file=sys.stderr)
69 | sys.exit(1)
70 |
71 | if __name__ == "__main__":
72 | if len(sys.argv) != 4:
73 | print(json.dumps({
74 | "status": "error",
75 | "message": "Usage: python add_face.py "
76 | }), file=sys.stderr)
77 | sys.exit(1)
78 |
79 | name = sys.argv[1]
80 | image_path = sys.argv[2]
81 | known_faces_file = sys.argv[3]
82 | add_face(name, image_path, known_faces_file)
--------------------------------------------------------------------------------
/python/plugins/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Dict, Any
3 | import numpy as np
4 |
5 |
6 | class AnalyzerPlugin(ABC):
7 | """
8 | Base class for all video analysis plugins.
9 |
10 | Plugins extend the video analysis pipeline by processing frames
11 | and extracting specific types of information (objects, faces, etc).
12 | """
13 |
14 | def __init__(self, config: Dict[str, Any]):
15 | """
16 | Initialize plugin with configuration.
17 |
18 | Args:
19 | config: Configuration dictionary containing plugin settings
20 | """
21 | self.config = config
22 |
23 | @abstractmethod
24 | def setup(self) -> None:
25 | """
26 | Perform one-time initialization (load models, resources, etc).
27 | Called once before frame processing begins.
28 | """
29 | pass
30 |
31 | @abstractmethod
32 | def analyze_frame(
33 | self,
34 | frame: np.ndarray,
35 | frame_analysis: Dict[str, Any],
36 | video_path: str
37 | ) -> Dict[str, Any]:
38 | """
39 | Analyze a single video frame.
40 |
41 | Args:
42 | frame: Video frame as NumPy array (BGR format)
43 | frame_analysis: Existing analysis data for this frame
44 | video_path: Path to the video being analyzed
45 |
46 | Returns:
47 | Updated frame_analysis dictionary with plugin results
48 | """
49 | pass
50 |
51 | @abstractmethod
52 | def get_results(self) -> Any:
53 | """
54 | Return accumulated results from all processed frames.
55 | Called after all frames have been analyzed.
56 | """
57 | pass
58 |
59 | @abstractmethod
60 | def get_summary(self) -> Any:
61 | """
62 | Return high-level summary of analysis results.
63 | Called after processing is complete.
64 | """
65 | pass
--------------------------------------------------------------------------------
/python/plugins/emotion_detection.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import warnings
4 | from typing import List, Dict, Any
5 |
6 | from fer import FER
7 |
8 | from plugins.base import AnalyzerPlugin
9 |
10 |
11 | import numpy as np
12 |
13 | class EmotionDetectionPlugin(AnalyzerPlugin):
14 | """
15 | A plugin for detecting emotions in faces.
16 | """
17 |
18 | def __init__(self, config: Dict[str, Any]):
19 | super().__init__(config)
20 | self.emotion_detector = None
21 |
22 | def setup(self):
23 | """
24 | Initializes the FER emotion detector.
25 | """
26 | self.emotion_detector = FER(mtcnn=True)
27 |
28 | def analyze_frame(self, frame: np.ndarray, frame_analysis: Dict[str, Any], video_path: str) -> Dict[str, Any]:
29 | """
30 | Analyzes a single frame for emotions.
31 |
32 | :param frame: The frame to analyze.
33 | :param frame_analysis: A dictionary containing the analysis results so far for the current frame.
34 | :return: An updated dictionary with the analysis results from this plugin.
35 | """
36 | if 'faces' in frame_analysis and frame_analysis['faces']:
37 | self._add_emotions(frame, frame_analysis['faces'])
38 | return frame_analysis
39 |
40 | def _add_emotions(self, frame: Any, faces: List[Dict]) -> None:
41 | """
42 | Add emotion data to recognized faces.
43 | """
44 | if not faces:
45 | return
46 |
47 | with warnings.catch_warnings():
48 | warnings.simplefilter("ignore")
49 | try:
50 | emotions_results = self.emotion_detector.detect_emotions(frame)
51 | except Exception as e:
52 | print(f"DEBUG: Error detecting emotions for frame: {e}", file=sys.stderr)
53 | emotions_results = []
54 |
55 | if emotions_results:
56 | for face in faces:
57 | ft, fr, fb, fl = face['location']
58 | face_area = (fr - fl) * (fb - ft)
59 |
60 | best_match_emotion = None
61 | max_iou = 0.0
62 |
63 | for emotion_res in emotions_results:
64 | el, et, ew, eh = emotion_res['box']
65 | er = el + ew
66 | eb = et + eh
67 | emotion_area = ew * eh
68 |
69 | x_overlap = max(0, min(fr, er) - max(fl, el))
70 | y_overlap = max(0, min(fb, eb) - max(ft, et))
71 | intersection_area = x_overlap * y_overlap
72 |
73 | union_area = face_area + emotion_area - intersection_area
74 | iou = intersection_area / union_area if union_area > 0 else 0
75 |
76 | if iou > max_iou:
77 | max_iou = iou
78 | best_match_emotion = emotion_res['emotions']
79 |
80 | if max_iou > 0.4:
81 | face['emotion'] = best_match_emotion
82 | else:
83 | face['emotion'] = None
84 | else:
85 | for face in faces:
86 | face['emotion'] = None
87 |
88 | def get_results(self) -> Any:
89 | """
90 | Returns the final analysis results from the plugin.
91 | """
92 | return None
93 |
94 | def get_summary(self) -> Any:
95 | """
96 | Returns a summary of the analysis results.
97 | """
98 | return None
99 |
--------------------------------------------------------------------------------
/python/plugins/object_detection.py:
--------------------------------------------------------------------------------
1 |
2 | from typing import List, Dict, Any
3 | import numpy as np
4 | import torch
5 | from ultralytics import YOLO
6 |
7 | from plugins.base import AnalyzerPlugin
8 |
9 |
10 |
11 | class ObjectDetectionPlugin(AnalyzerPlugin):
12 | """
13 | A plugin for detecting objects in video frames using YOLO.
14 | """
15 |
16 | def __init__(self, config: Dict[str, Any]):
17 | super().__init__(config)
18 | self.yolo_model = None
19 |
20 | def setup(self):
21 | """
22 | Initializes the YOLO model.
23 | """
24 | self.yolo_model = YOLO(self.config['yolo_model'])
25 | self.yolo_model.to(self.config['device'])
26 | self.yolo_model.fuse()
27 |
28 | def analyze_frame(self, frame: np.ndarray, frame_analysis: Dict[str, Any], video_path: str) -> Dict[str, Any]:
29 | """
30 | Analyzes a single frame for objects.
31 |
32 | :param frame: The frame to analyze.
33 | :param frame_analysis: A dictionary containing the analysis results so far for the current frame.
34 | :return: An updated dictionary with the analysis results from this plugin.
35 | """
36 | detections_results = self._run_object_detection([frame])
37 | frame_objects = []
38 | if detections_results:
39 | detections = detections_results[0]
40 | if detections.boxes:
41 | for det in detections.boxes:
42 | label = self.yolo_model.names[int(det.cls[0])]
43 | confidence = float(det.conf[0])
44 | box = det.xyxy[0].tolist()
45 |
46 | frame_objects.append({
47 | "label": label,
48 | "confidence": confidence,
49 | "box": box
50 | })
51 | frame_analysis['objects'] = frame_objects
52 | return frame_analysis
53 |
54 | def _run_object_detection(self, frames: List[Any]) -> List:
55 | """
56 | Run YOLO object detection on a batch of frames.
57 | """
58 | with torch.no_grad():
59 | return self.yolo_model.predict(
60 | frames,
61 | verbose=False,
62 | device=self.config['device'],
63 | conf=self.config['yolo_confidence'],
64 | iou=self.config['yolo_iou'],
65 | half=True
66 | )
67 |
68 | def get_results(self) -> Any:
69 | """
70 | Returns the final analysis results from the plugin.
71 | """
72 | return None
73 |
74 | def get_summary(self) -> Any:
75 | """
76 | Returns a summary of the analysis results.
77 | """
78 | return None
79 |
--------------------------------------------------------------------------------
/python/plugins/shot_type.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | import numpy as np
3 | from collections import deque
4 | from plugins.base import AnalyzerPlugin
5 |
6 | class ShotTypePlugin(AnalyzerPlugin):
7 | """
8 | A plugin for classifying the shot type of video frames.
9 | Depends on face detection data from FaceRecognitionPlugin.
10 | """
11 |
12 | def __init__(self, config: Dict[str, Any]):
13 | super().__init__(config)
14 | self.CLOSE_UP_THRESHOLD = config.get("close_up_threshold", 0.3)
15 | self.MEDIUM_SHOT_THRESHOLD = config.get("medium_shot_threshold", 0.1)
16 | self.ratio_window = deque(maxlen=config.get("smoothing_window", 5))
17 |
18 | def setup(self):
19 | pass
20 |
21 | def analyze_frame(self, frame: np.ndarray, frame_analysis: Dict[str, Any], video_path: str) -> Dict[str, Any]:
22 | frame_height, frame_width = frame.shape[:2]
23 | faces = frame_analysis.get("faces", [])
24 | shot_type = self.classify(frame_width, frame_height, faces)
25 | frame_analysis["shot_type"] = shot_type
26 | return frame_analysis
27 |
28 | def classify(self, frame_width: int, frame_height: int, faces: List[Dict]) -> str:
29 | if not faces:
30 | return "long-shot"
31 |
32 | frame_area = frame_width * frame_height
33 |
34 | total_face_area = sum(
35 | abs(r - l) * abs(b - t)
36 | for face in faces
37 | if (loc := face.get("location")) and len(loc) == 4
38 | for t, r, b, l in [loc]
39 | )
40 |
41 | ratio = total_face_area / frame_area if frame_area else 0.0
42 | self.ratio_window.append(ratio)
43 | smoothed_ratio = np.mean(self.ratio_window)
44 |
45 | if smoothed_ratio > self.CLOSE_UP_THRESHOLD:
46 | return "close-up"
47 | elif smoothed_ratio > self.MEDIUM_SHOT_THRESHOLD:
48 | return "medium-shot"
49 | return "long-shot"
50 |
51 | def get_results(self) -> Any:
52 | return None
53 |
54 | def get_summary(self) -> Any:
55 | return None
56 |
--------------------------------------------------------------------------------
/python/plugins/text_detection.py:
--------------------------------------------------------------------------------
1 | from .base import AnalyzerPlugin
2 | from typing import Dict, Any
3 | import numpy as np
4 | import logging
5 |
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | try:
10 | import easyocr
11 | EASYOCR_AVAILABLE = True
12 | except ImportError:
13 | EASYOCR_AVAILABLE = False
14 |
15 | class TextDetectionPlugin(AnalyzerPlugin):
16 | """
17 | Analyzes frames to detect and recognize text using EasyOCR.
18 | """
19 |
20 | def __init__(self, config: Dict[str, Any]):
21 | super().__init__(config)
22 | self.reader = None
23 |
24 | def setup(self):
25 | """
26 | Initializes the EasyOCR reader. This is done once when the plugin is loaded.
27 | """
28 | if not EASYOCR_AVAILABLE:
29 | logger.warning("EasyOCR not installed. Text detection will be skipped. Please run: pip install easyocr")
30 | return
31 |
32 | try:
33 | # Initialize the reader for English. It will download the model on the first run.
34 | self.reader = easyocr.Reader(['en'], gpu=self.config.get('device') != 'cpu')
35 | logger.info("EasyOCR reader initialized for text detection.")
36 | except Exception as e:
37 | logger.error(f"Failed to initialize EasyOCR reader: {e}")
38 | self.reader = None
39 |
40 | def analyze_frame(self, frame: np.ndarray, frame_analysis: Dict[str, Any], video_path: str) -> Dict[str, Any]:
41 | """
42 | Detects text in a single frame.
43 |
44 | :param frame: The frame to analyze (as a NumPy array).
45 | :param frame_analysis: Existing analysis for this frame.
46 | :return: Updated analysis dictionary with detected text.
47 | """
48 | # Check if reader is initialized
49 | if self.reader is None:
50 | return {}
51 |
52 | try:
53 | # EasyOCR expects images in RGB format
54 | frame_rgb = frame[:, :, ::-1]
55 |
56 | # Perform text detection and recognition
57 | results = self.reader.readtext(frame_rgb, detail=1)
58 |
59 | detected_texts = []
60 | for (bbox, text, prob) in results:
61 | # bbox is a list of 4 points (x, y)
62 | # Convert numpy types to native Python types for JSON serialization
63 | detected_texts.append({
64 | 'text': text,
65 | 'confidence': float(prob),
66 | 'bounding_box': [[int(p[0]), int(p[1])] for p in bbox]
67 | })
68 |
69 |
70 | if detected_texts:
71 | return {'detected_text': detected_texts}
72 |
73 | except Exception as e:
74 | logger.error(f"Error during text detection: {e}")
75 |
76 | return {}
77 |
78 | def get_results(self) -> Any:
79 | """
80 | This plugin does not aggregate scene-level results.
81 | """
82 | return None
83 |
84 | def get_summary(self) -> Any:
85 | """
86 | This plugin does not provide a summary.
87 | """
88 | return None
--------------------------------------------------------------------------------
/python/requirements.txt:
--------------------------------------------------------------------------------
1 | # Core dependencies
2 | opencv-python>=4.8.0
3 | numpy>=1.24.0,<2.0.0
4 | tqdm>=4.66.0
5 |
6 | # ML/AI frameworks
7 | torch>=2.1.0
8 | torchvision>=0.16.0
9 | ultralytics>=8.0.0
10 |
11 | # Face recognition
12 | face-recognition>=1.3.0
13 | dlib>=19.24.0
14 |
15 | # Emotion detection (optional - can be heavy)
16 | fer>=22.5.0
17 | tensorflow>=2.13.0
18 |
19 | # Vector database
20 | chromadb>=0.4.0
21 |
22 | # Audio transcription
23 | openai-whisper>=20230314
24 |
25 | # Color analysis
26 | scikit-learn>=1.3.0
27 |
28 | # OCR
29 | easyocr>=1.7.0
30 | Pillow>=9.3.0
31 |
32 | # Monitoring
33 | psutil>=5.9.0
34 |
35 | # Websockets
36 | websockets>=12.0
37 | python-multipart>=0.0.6
38 |
39 | # Testing
40 | pytest>=7.4.0
41 | pytest-mock>=3.12.0
--------------------------------------------------------------------------------
/resources/build/entitlements.mac.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | com.apple.security.cs.allow-jit
6 |
7 | com.apple.security.cs.allow-unsigned-executable-memory
8 |
9 | com.apple.security.cs.allow-dyld-environment-variables
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/resources/build/icon.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/build/icon.icns
--------------------------------------------------------------------------------
/resources/build/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/build/icon.ico
--------------------------------------------------------------------------------
/resources/build/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/build/icon.png
--------------------------------------------------------------------------------
/resources/build/icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/resources/icons/electron.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/icons/electron.png
--------------------------------------------------------------------------------
/resources/icons/era.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/resources/icons/react.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/icons/react.png
--------------------------------------------------------------------------------
/resources/icons/shadcn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/icons/shadcn.png
--------------------------------------------------------------------------------
/resources/icons/tailwind.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/icons/tailwind.png
--------------------------------------------------------------------------------
/resources/icons/vite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IliasHad/edit-mind/a5be7ff9527f4a85857ec5fd443b5d06ab821007/resources/icons/vite.png
--------------------------------------------------------------------------------
/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "num_dominant_colors": 2,
3 | "color_sample_size": 1000,
4 | "sample_interval_seconds": 2.0,
5 | "max_workers": 4,
6 | "batch_size": 16,
7 | "yolo_confidence": 0.35,
8 | "yolo_iou": 0.45,
9 | "resize_to_720p": true,
10 | "yolo_model": "yolov8n.pt",
11 | "output_dir": "analysis_results",
12 | "enable_streaming": true,
13 | "enable_aggressive_gc": true,
14 | "frame_buffer_limit": 8
15 | }
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "paths": {
5 | "@/*": ["./*"],
6 | "@/app/*": ["./app/*"],
7 | "@/lib/*": ["./lib/*"],
8 | "@/resources/*": ["./resources/*"]
9 | }
10 | },
11 | "files": [],
12 | "references": [{ "path": "./tsconfig.node.json" }, { "path": "./tsconfig.web.json" }]
13 | }
14 |
--------------------------------------------------------------------------------
/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@electron-toolkit/tsconfig/tsconfig.node.json",
3 | "include": ["lib/main/index.d.ts", "electron.vite.config.*", "lib/**/*", "resources/**/*", "app/**/*"],
4 | "compilerOptions": {
5 | "composite": true,
6 | "moduleResolution": "bundler",
7 | "types": ["electron-vite/node"],
8 | "baseUrl": ".",
9 | "paths": {
10 | "@/*": ["./*"]
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/tsconfig.web.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@electron-toolkit/tsconfig/tsconfig.web.json",
3 | "include": [
4 | "app/index.d.ts",
5 | "app/**/*",
6 | "lib/**/*",
7 | "lib/conveyor/*.d.ts",
8 | "resources/**/*"
9 | ],
10 | "compilerOptions": {
11 | "composite": true,
12 | "jsx": "react-jsx",
13 | "baseUrl": ".",
14 | "types": [
15 | "electron-vite/node"
16 | ],
17 | "paths": {
18 | "@/*": [
19 | "./*"
20 | ]
21 | }
22 | }
23 | }
--------------------------------------------------------------------------------