├── .claude └── settings.local.json ├── .cursor └── rules │ ├── add-new-llm-provider.mdc │ ├── api-management.mdc │ ├── code-conventions.mdc │ ├── development-process.mdc │ ├── project-structure.mdc │ ├── task-guides.mdc │ ├── tech-stack.mdc │ └── test.mdc ├── .cursorrules ├── .dockerignore ├── .env.example ├── .eslintrc.json ├── .github ├── FUNDING.yml └── workflows │ ├── claude.yml │ ├── lint-and-format.yml │ ├── locale-updater.yml │ ├── old │ ├── auto-translate.yml │ └── issue-analyzer.yml │ └── test.yml ├── .gitignore ├── .prettierignore ├── .prettierrc.json ├── .vscode └── settings.json ├── CLAUDE.md ├── Dockerfile ├── LICENSE ├── README.md ├── cursor.txt ├── docker-compose.yml ├── docs ├── README_en.md ├── README_ko.md ├── README_pl.md ├── README_zh.md ├── auto_translate.md ├── character_model_licence.md ├── character_model_licence_en.md ├── character_model_licence_ko.md ├── character_model_licence_pl.md ├── character_model_licence_zh.md ├── images │ ├── architecture.svg │ └── architecture_en.svg ├── license-faq.md ├── license-faq_en.md ├── license.md ├── license_en.md ├── logo.png ├── logo_licence.md ├── logo_licence_en.md ├── logo_licence_ko.md ├── logo_licence_pl.md └── logo_licence_zh.md ├── electron.mjs ├── jest.config.js ├── jest.setup.canvas.js ├── jest.setup.js ├── locales ├── ar │ └── translation.json ├── de │ └── translation.json ├── en │ └── translation.json ├── es │ └── translation.json ├── fr │ └── translation.json ├── hi │ └── translation.json ├── it │ └── translation.json ├── ja │ └── translation.json ├── ko │ └── translation.json ├── pl │ └── translation.json ├── pt │ └── translation.json ├── ru │ └── translation.json ├── th │ └── translation.json ├── vi │ └── translation.json └── zh │ └── translation.json ├── next.config.js ├── package-lock.json ├── package.json ├── postcss.config.js ├── public ├── backgrounds │ └── bg-c.png ├── bg-c.png ├── favicon.ico ├── github-mark-white.svg ├── idle_loop.vrma ├── images │ ├── ai-logos │ │ ├── anthropic.svg │ │ ├── azure.svg │ │ ├── cohere.svg │ │ ├── custom-api.svg │ │ ├── deepseek.svg │ │ ├── dify.svg │ │ ├── fireworks.svg │ │ ├── google.svg │ │ ├── groq.svg │ │ ├── lmstudio.svg │ │ ├── local.svg │ │ ├── mistralai.svg │ │ ├── ollama.svg │ │ ├── openai.svg │ │ ├── openrouter.svg │ │ ├── perplexity.svg │ │ └── xai.svg │ ├── icons │ │ ├── external-link.svg │ │ ├── screen-share.svg │ │ └── stop.svg │ └── setting-icons │ │ ├── ai-settings.svg │ │ ├── basic-settings.svg │ │ ├── character-settings.svg │ │ ├── conversation-history.svg │ │ ├── description.svg │ │ ├── logo2-2favicon.svg │ │ ├── microphone-settings.svg │ │ ├── other-settings.svg │ │ ├── slide-settings.svg │ │ ├── voice-settings.svg │ │ └── youtube-settings.svg ├── live2d │ └── nike01 │ │ ├── expressions │ │ ├── Angry.exp3.json │ │ ├── Focus.exp3.json │ │ ├── HairpinChange.exp3.json │ │ ├── Happy.exp3.json │ │ ├── Happy2.exp3.json │ │ ├── Neutral.exp3.json │ │ ├── NoSmile.exp3.json │ │ ├── Sad.exp3.json │ │ ├── Sad2.exp3.json │ │ ├── Sleep.exp3.json │ │ ├── Think.exp3.json │ │ ├── Think2.exp3.json │ │ ├── Troubled.exp3.json │ │ └── Zitome.exp3.json │ │ ├── items_pinned_to_model.json │ │ ├── motions │ │ ├── Motion1.motion3.json │ │ ├── Motion2.motion3.json │ │ ├── Motion3.motion3.json │ │ ├── Motion4.motion3.json │ │ ├── Motion5.motion3.json │ │ ├── Motion6.motion3.json │ │ ├── Motion7.motion3.json │ │ ├── Motion8.motion3.json │ │ └── Motion9.motion3.json │ │ ├── nike01.8192 │ │ └── texture_00.png │ │ ├── nike01.cdi3.json │ │ ├── nike01.moc3 │ │ ├── nike01.model3.json │ │ └── nike01.physics3.json ├── ogp.png ├── scripts │ └── .gitkeep ├── slides │ ├── demo │ │ ├── images │ │ │ ├── demo-folder.png │ │ │ ├── file-structure.png │ │ │ ├── logo.png │ │ │ ├── settings-screen.png │ │ │ └── start-button.png │ │ ├── scripts.json │ │ ├── slides.md │ │ ├── supplement.txt │ │ └── theme.css │ └── sample.txt ├── speakers_aivis.json ├── voice_test.wav └── vrm │ ├── AvatarSample_A.vrm │ ├── AvatarSample_B.vrm │ ├── AvatarSample_C.vrm │ ├── nikechan_v1.vrm │ ├── nikechan_v2.vrm │ └── nikechan_v2_outerwear.vrm ├── requirements.txt ├── scripts ├── .gitignore ├── analyze_issue.py ├── auto_translate.py └── requirements.txt ├── src ├── __mocks__ │ ├── canvas.js │ ├── canvasMock.js │ ├── node-canvas.js │ ├── openai.js │ ├── readableStream.js │ └── three │ │ └── examples │ │ └── jsm │ │ ├── controls │ │ └── OrbitControls.js │ │ └── loaders │ │ └── GLTFLoader.js ├── __tests__ │ ├── features │ │ ├── chat │ │ │ ├── aiChatFactory.test.ts │ │ │ ├── difyChat.test.ts │ │ │ ├── handlers.test.ts │ │ │ ├── openAIAudioChat.test.ts │ │ │ └── vercelAIChat.test.ts │ │ └── messages │ │ │ ├── messageSelectors.test.ts │ │ │ └── speakCharacter.test.ts │ ├── testUtils.ts │ └── utils │ │ └── textProcessing.test.ts ├── components │ ├── Live2DComponent.tsx │ ├── assistantText.tsx │ ├── capture.tsx │ ├── characterPresetMenu.tsx │ ├── chatLog.tsx │ ├── common │ │ └── VideoDisplay.tsx │ ├── form.tsx │ ├── githubLink.tsx │ ├── iconButton.tsx │ ├── introduction.tsx │ ├── link.tsx │ ├── live2DViewer.tsx │ ├── menu.tsx │ ├── messageInput.tsx │ ├── messageInputContainer.tsx │ ├── messageReceiver.tsx │ ├── meta.tsx │ ├── modalImage.tsx │ ├── presetQuestionButtons.tsx │ ├── realtimeAPITools.json │ ├── realtimeAPITools.tsx │ ├── realtimeAPIUtils.tsx │ ├── settings │ │ ├── advancedSettings.tsx │ │ ├── ai.tsx │ │ ├── based.tsx │ │ ├── character.tsx │ │ ├── description.tsx │ │ ├── externalLinkage.tsx │ │ ├── index.tsx │ │ ├── log.tsx │ │ ├── messageReceiver.tsx │ │ ├── modelProvider.tsx │ │ ├── other.tsx │ │ ├── presetQuestions.tsx │ │ ├── slide.tsx │ │ ├── slideConvert.tsx │ │ ├── speechInput.tsx │ │ ├── voice.tsx │ │ └── youtube.tsx │ ├── slideContent.tsx │ ├── slideControls.tsx │ ├── slideText.tsx │ ├── slides.tsx │ ├── speakers.json │ ├── textButton.tsx │ ├── toast.tsx │ ├── toasts.tsx │ ├── useExternalLinkage.tsx │ ├── useRealtimeAPI.tsx │ ├── useYoutube.tsx │ ├── vrmViewer.tsx │ ├── webcam.tsx │ ├── websocketManager.tsx │ └── youtubeManager.tsx ├── features │ ├── chat │ │ ├── aiChatFactory.ts │ │ ├── difyChat.ts │ │ ├── handlers.ts │ │ ├── openAIAudioChat.ts │ │ └── vercelAIChat.ts │ ├── constants │ │ ├── aiModels.ts │ │ ├── koeiroParam.ts │ │ ├── settings.ts │ │ └── systemPromptConstants.ts │ ├── emoteController │ │ ├── autoBlink.ts │ │ ├── autoLookAt.ts │ │ ├── emoteConstants.ts │ │ ├── emoteController.ts │ │ └── expressionController.ts │ ├── lipSync │ │ ├── lipSync.ts │ │ └── lipSyncAnalyzeResult.ts │ ├── messages │ │ ├── live2dHandler.ts │ │ ├── messageSelectors.ts │ │ ├── messages.ts │ │ ├── speakCharacter.ts │ │ ├── speakQueue.ts │ │ ├── synthesizeStyleBertVITS2.ts │ │ ├── synthesizeVoiceAivisSpeech.ts │ │ ├── synthesizeVoiceAzureOpenAI.ts │ │ ├── synthesizeVoiceElevenlabs.ts │ │ ├── synthesizeVoiceGSVI.ts │ │ ├── synthesizeVoiceGoogle.ts │ │ ├── synthesizeVoiceKoeiromap.ts │ │ ├── synthesizeVoiceNijivoice.ts │ │ ├── synthesizeVoiceOpenAI.ts │ │ └── synthesizeVoiceVoicevox.ts │ ├── slide │ │ └── slideAIHelpers.ts │ ├── stores │ │ ├── home.ts │ │ ├── menu.ts │ │ ├── settings.ts │ │ ├── slide.ts │ │ ├── toast.ts │ │ └── websocketStore.ts │ ├── vrmViewer │ │ ├── model.ts │ │ └── viewer.ts │ └── youtube │ │ ├── conversationContinuityFunctions.ts │ │ └── youtubeComments.ts ├── hooks │ ├── useAudioProcessing.ts │ ├── useBrowserSpeechRecognition.ts │ ├── useDraggable.ts │ ├── useRealtimeVoiceAPI.ts │ ├── useResizable.ts │ ├── useSilenceDetection.ts │ ├── useVoiceRecognition.ts │ └── useWhisperRecognition.ts ├── lib │ ├── VRMAnimation │ │ ├── VRMAnimation.ts │ │ ├── VRMAnimationLoaderPlugin.ts │ │ ├── VRMAnimationLoaderPluginOptions.ts │ │ ├── VRMCVRMAnimation.ts │ │ ├── loadVRMAnimation.ts │ │ └── utils │ │ │ ├── arrayChunk.ts │ │ │ ├── linearstep.ts │ │ │ └── saturate.ts │ ├── VRMLookAtSmootherLoaderPlugin │ │ ├── VRMLookAtSmoother.ts │ │ └── VRMLookAtSmootherLoaderPlugin.ts │ └── i18n.js ├── pages │ ├── _app.tsx │ ├── _document.tsx │ ├── api │ │ ├── ai │ │ │ ├── custom.ts │ │ │ └── vercel.ts │ │ ├── azureOpenAITTS.ts │ │ ├── convertMarkdown.ts │ │ ├── convertSlide.ts │ │ ├── difyChat.ts │ │ ├── elevenLabs.ts │ │ ├── get-background-list.ts │ │ ├── get-live2d-list.ts │ │ ├── get-nijivoice-actors.ts │ │ ├── get-vrm-list.ts │ │ ├── getSlideFolders.ts │ │ ├── getSupplement.ts │ │ ├── messages.ts │ │ ├── openAITTS.ts │ │ ├── save-chat-log.ts │ │ ├── services │ │ │ ├── customApi.ts │ │ │ ├── utils.ts │ │ │ └── vercelAi.ts │ │ ├── stylebertvits2.ts │ │ ├── tts-aivisspeech.ts │ │ ├── tts-google.ts │ │ ├── tts-koeiromap.ts │ │ ├── tts-nijivoice.ts │ │ ├── tts-voicevox.ts │ │ ├── update-aivis-speakers.ts │ │ ├── updateSlideData.ts │ │ ├── upload-background.ts │ │ ├── upload-vrm-list.ts │ │ └── whisper.ts │ ├── index.tsx │ ├── send-message.tsx │ └── slide-editor │ │ └── [slideName].tsx ├── styles │ └── globals.css ├── types │ └── charcoal-ui.d.ts └── utils │ ├── WebSocketManager.ts │ ├── audioBufferManager.ts │ ├── audioProcessing.ts │ ├── buildUrl.ts │ ├── englishToJapanese.json │ ├── messageUtils.ts │ ├── migrateStore.ts │ ├── modelMigration.ts │ ├── reduceTalkStyle.ts │ ├── textProcessing.ts │ ├── voiceLanguage.ts │ └── wait.ts ├── tailwind.config.js ├── tsconfig.json └── watch.json /.claude/settings.local.json: -------------------------------------------------------------------------------- 1 | { 2 | "permissions": { 3 | "allow": [ 4 | "Bash(rg:*)", 5 | "Bash(npm run lint)", 6 | "Bash(grep:*)", 7 | "Bash(npm run build:*)", 8 | "Bash(npx prettier:*)", 9 | "Bash(npm test:*)", 10 | "Bash(rm:*)", 11 | "Bash(mkdir:*)", 12 | "Bash(npm rebuild:*)", 13 | "mcp__ide__getDiagnostics", 14 | "Bash(npm run dev:*)", 15 | "Bash(npm run typecheck:*)" 16 | ], 17 | "deny": [] 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.cursor/rules/api-management.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # API バージョン管理 7 | 8 | ## 重要な制約事項 9 | - **クライアントの一元管理:** すべてのAIサービス統合ロジックは [`src/pages/api/services/`](mdc:src/pages/api/services/) 配下で一元管理されます。 10 | - **厳格なバージョン管理:** AIモデルのデフォルトバージョンは各サービスファイル内で厳密に管理されます(例: [`vercelAi.ts`](mdc:src/pages/api/services/vercelAi.ts) など)。 11 | - **コアファイルの変更禁止:** 以下の設定ファイルは**変更禁止**です(変更には承認が必要): 12 | - [`src/features/constants/settings.ts`](mdc:src/features/constants/settings.ts) - 共通設定と型定義。 13 | 14 | ## 実装規則 15 | - **モデルバージョン:** AIモデルのバージョンは**必ず** サービスごとのソース (例: `vercelAi.ts`) に定義してください。 16 | - **型定義:** 型定義には**必ず** [`src/features/constants/settings.ts`](mdc:src/features/constants/settings.ts) を参照してください。 17 | - **環境変数:** 環境変数へのアクセスは `.env` または `process.env` を使用し、必要なら専用ヘルパーを追加してください。 18 | - **配置場所:** すべてのAIサービス関連処理は [`src/pages/api/services/`](mdc:src/pages/api/services) ディレクトリ内に配置してください。 19 | -------------------------------------------------------------------------------- /.cursor/rules/code-conventions.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # コーディング規約 7 | 8 | ## 重複防止 9 | - **実装前の確認:** 新しいコードを書く前に、以下を徹底的に確認してください: 10 | - 既存の類似機能や機能性。 11 | - 同じまたは類似の名前を持つ関数、コンポーネント、またはクラス。 12 | - 重複するAPIエンドポイント。 13 | - 共通ロジックを抽出して再利用する機会。 14 | 15 | ## 実装ガイドライン 16 | - **ディレクトリ構造:** [`project-structure.mdc`](mdc:.cursor/rules/project-structure.mdc) で概説されている定義済みのプロジェクト構造に厳密に従ってください。 17 | - **命名規則:** ファイル、変数、関数、クラス、コンポーネントの命名において一貫性を維持してください。 18 | - **共通ロジック:** 再利用可能なユーティリティ関数やロジックは、適切な共有ディレクトリ(例: [`src/utils/`](mdc:src/utils))に配置してください。 19 | 20 | ## UI/UXの変更 21 | - **厳格な禁止事項:** 以下の場合を除き、UI/UXデザインの変更(レイアウト、色、フォント、間隔など)は**行わないでください**: 22 | 1. 変更の明確な理由が提示されている。 23 | 2. 変更を行う**前に**、**明示的な承認**が得られている。 24 | -------------------------------------------------------------------------------- /.cursor/rules/development-process.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # 開発プロセスとガイドライン 7 | 8 | すべてのタスクにおいて、以下の構造化されたプロセスに従ってください: 9 | 10 | ## 1. 分析と計画 (<タスク分析>) 11 | - 主要なタスクを要約する。 12 | - 技術スタックを確認し、制約内で実装を計画する。 13 | - 主要な要件と制約を特定する。 14 | - 潜在的な課題をリストアップする。 15 | - 具体的な実行ステップを詳細化する。 16 | - 最適なステップの順序を決定する。 17 | - 必要なツールやリソースを考慮する。 18 | - **重複実装の防止:** 実装前に、既存の類似機能、類似名の関数/コンポーネント、重複するAPIエンドポイント、共通ロジック抽出の機会を確認する。 19 | 20 | ## 2. タスク実行 21 | - ステップを順次実行する。 22 | - 各ステップ完了後、進捗を簡潔に報告する。 23 | - ディレクトリ構造、命名規則、共通ロジックの適切な配置を遵守する。 24 | - 問題が発生した場合は即座に報告し、解決策を提案する。 25 | 26 | ## 3. 品質管理と問題解決 27 | - 結果を迅速に検証する。 28 | - エラー発生時: 29 | a. 問題を切り分け、根本原因を特定する(ログ、デバッグ)。 30 | b. 解決策を作成し、実行する。 31 | c. 修正を検証する。 32 | d. 必要に応じてデバッグログを分析する。 33 | - 検証結果を記録する: 34 | a. テスト項目と期待される結果。 35 | b. 実際の結果と差異。 36 | c. 必要なアクション(該当する場合)。 37 | - コマンド出力を確認し、結果を報告する。 38 | 39 | ## 4. 最終確認 40 | - 完了時に成果物全体を評価する。 41 | - 元の指示との整合性を確認し、必要に応じて調整する。 42 | - 実装の重複がないか最終確認を行う。 43 | 44 | ## 5. 結果報告 45 | 最終報告には以下の形式を使用する: 46 | ```markdown 47 | # 実行結果報告 48 | 49 | ## 概要 50 | [全体の要約] 51 | 52 | ## 実行ステップ 53 | 1. [ステップ1の説明と結果] 54 | 2. [ステップ2の説明と結果] 55 | ... 56 | 57 | ## 最終成果物 58 | [最終成果物の詳細、該当する場合はリンク] 59 | 60 | ## 課題対応(該当する場合) 61 | - 発生した問題と対応内容 62 | - 今後の考慮事項 63 | 64 | ## 注意点・改善提案 65 | - [気づいた点や改善提案] 66 | ``` 67 | 68 | ## 重要な注意点 69 | - **明確化:** 不明な点があれば、開始前に質問する。 70 | - **承認:** 重要な決定については報告し、承認を得る。 71 | - **問題:** 予期せぬ問題が発生した場合は即座に報告し、解決策を提案する。 72 | - **未指定の変更禁止:** 明示的に要求されていない変更は**行わない**。必要な変更は提案し、まず承認を得る。 73 | - **UI/UXの凍結:** 理由が提示され、**事前に明示的な承認**が得られない限り、UI/UXデザインの変更(レイアウト、色、フォント、間隔など)は**厳禁**とする。 74 | - **技術スタックのバージョン:** 明確な理由と**明示的な承認**なしに、技術スタックのバージョン(API、フレームワーク、ライブラリ)を変更**しない**。 75 | -------------------------------------------------------------------------------- /.cursor/rules/project-structure.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # プロジェクト構成とファイル配置 7 | 8 | すべての実装において、以下のディレクトリ構造に従ってください: 9 | 10 | ``` 11 | aituber-kit/ 12 | ├── src/ # ソースコード 13 | │ ├── pages/ # Next.js pages ルーティング 14 | │ │ ├── api/ # APIエンドポイント 15 | │ │ │ ├── services/ # 共通APIサービスロジック 16 | │ │ │ │ ├── vercelAi.ts 17 | │ │ │ │ ├── customApi.ts 18 | │ │ │ │ └── utils.ts 19 | │ │ │ └── [...].ts # その他 API ルート 20 | │ │ └── [...].tsx # ページコンポーネント 21 | │ ├── components/ # 共有 React コンポーネント 22 | │ ├── features/ # 機能モジュール 23 | │ │ ├── chat/ # チャット関連機能 24 | │ │ ├── messages/ # メッセージ関連機能 25 | │ │ ├── stores/ # 状態管理 26 | │ │ └── ... # その他ドメイン 27 | │ ├── hooks/ # カスタムフック 28 | │ ├── utils/ # 共通ユーティリティ 29 | │ ├── lib/ # 外部ライブラリラッパー等 30 | │ └── styles/ # スタイル定義 31 | ├── public/ # 静的ファイル 32 | ├── locales/ # 多言語リソース 33 | ├── docs/ # ドキュメント 34 | └── scripts/ # ユーティリティスクリプト 35 | ``` 36 | 37 | ## 配置ルール 38 | - **UIコンポーネント:** 共有Reactコンポーネントは [`src/components/`](mdc:src/components) に配置してください。 39 | - **APIエンドポイント:** APIルートは [`src/pages/api/`](mdc:src/pages/api) で定義してください。 40 | - **共通ユーティリティ:** 共有関数は [`src/utils/`](mdc:src/utils) に配置してください。 41 | - **APIサービスロジック:** AIサービス統合などの処理は [`src/pages/api/services/`](mdc:src/pages/api/services) に実装してください。 42 | - **機能モジュール:** 機能固有のコード(チャット、メッセージ、状態管理など)は [`src/features/`](mdc:src/features) 以下にグループ化してください。 43 | - **カスタムフック:** カスタムReactフックは [`src/hooks/`](mdc:src/hooks) に配置してください。 44 | -------------------------------------------------------------------------------- /.cursor/rules/task-guides.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # タスク別ファイルガイド 7 | 8 | 一般的なタスクを実行する際は、以下のファイルリストを参照してください: 9 | 10 | ## 新しいLLMサービスの追加 11 | - [`src/features/chat/handlers.ts`](mdc:src/features/chat/handlers.ts) 12 | - [`src/components/settings/modelProvider.tsx`](mdc:src/components/settings/modelProvider.tsx) 13 | - [`src/features/stores/settings.ts`](mdc:src/features/stores/settings.ts) 14 | - [`locales/`](mdc:locales/) 内のローカライゼーションファイル(例: [`locales/ja/translation.json`](mdc:locales/ja/translation.json), [`locales/en/translation.json`](mdc:locales/en/translation.json) など) 15 | - [`src/features/chat/vercelAIChat.ts`](mdc:src/features/chat/vercelAIChat.ts) (または新しいVercel AI SDK統合ファイルを作成) 16 | - [`src/pages/api/aiChat.ts`](mdc:src/pages/api/aiChat.ts) 17 | - [`src/features/chat/aiChatFactory.ts`](mdc:src/features/chat/aiChatFactory.ts) 18 | - [`.env.example`](mdc:.env.example) (新しい環境変数用) 19 | 20 | ## 入力フォーム -> LLMワークフローの実装 21 | - [`src/components/form.tsx`](mdc:src/components/form.tsx) (または関連するフォームコンポーネント) 22 | - [`src/features/chat/handlers.ts`](mdc:src/features/chat/handlers.ts) 23 | - [`src/features/chat/vercelAIChat.ts`](mdc:src/features/chat/vercelAIChat.ts) (または特定のLLM統合) 24 | - [`src/features/chat/aiChatFactory.ts`](mdc:src/features/chat/aiChatFactory.ts) 25 | - [`src/pages/api/aiChat.ts`](mdc:src/pages/api/aiChat.ts) 26 | 27 | ## 新しい設定の追加 28 | - [`src/components/settings/`](mdc:src/components/settings/) 内の設定コンポーネント(例: [`advancedSettings.tsx`](mdc:src/components/settings/advancedSettings.tsx), [`ai.tsx`](mdc:src/components/settings/ai.tsx) など) 29 | - 設定ストア: [`src/features/stores/settings.ts`](mdc:src/features/stores/settings.ts) 30 | - メイン設定インデックス: [`src/components/settings/index.tsx`](mdc:src/components/settings/index.tsx) 31 | 32 | ## ライセンスドキュメントの変更 33 | - [`README.md`](mdc:README.md) 34 | - [`LICENSE`](mdc:LICENSE) 35 | - [`docs/`](mdc:docs/) 内のコアライセンスドキュメント(例: [`license.md`](mdc:docs/license.md), [`license_en.md`](mdc:docs/license_en.md) など) 36 | - [`docs/`](mdc:docs/) 内のライセンスFAQドキュメント(例: [`license-faq.md`](mdc:docs/license-faq.md), [`license-faq_en.md`](mdc:docs/license-faq_en.md) など) 37 | 38 | ## 音声関連機能の処理 39 | - 音声言語設定: [`src/utils/voiceLanguage.ts`](mdc:src/utils/voiceLanguage.ts) 40 | - メッセージ入力コンポーネント: [`src/components/messageInputContainer.tsx`](mdc:src/components/messageInputContainer.tsx), [`src/components/messageInput.tsx`](mdc:src/components/messageInput.tsx) 41 | - 音声認識フック: [`src/hooks/useSilenceDetection.ts`](mdc:src/hooks/useSilenceDetection.ts), [`src/hooks/useVoiceRecognition.ts`](mdc:src/hooks/useVoiceRecognition.ts) 42 | - 音声処理ユーティリティ: [`src/utils/audioProcessing.ts`](mdc:src/utils/audioProcessing.ts), [`src/utils/audioBufferManager.ts`](mdc:src/utils/audioBufferManager.ts) 43 | -------------------------------------------------------------------------------- /.cursor/rules/tech-stack.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | globs: 4 | alwaysApply: false 5 | --- 6 | # 技術スタック 7 | 8 | 以下の技術については、指定されたバージョンに従ってください。**事前の明示的な承認なしにバージョンを変更しないでください。** 9 | 10 | ## コア技術 11 | - TypeScript: ^5.0.2 12 | - Node.js: ^20.0.0 13 | - @anthropic-ai/sdk: ^0.20.8 14 | 15 | ## フロントエンド 16 | - Next.js: ^14.2.5 17 | - React: ^18.3.1 18 | - Tailwind CSS: ^3.4.14 19 | - @headlessui/react: ^2.1.2 20 | 21 | ## バックエンド 22 | - @supabase/supabase-js: ^2.46.2 23 | - zod: ^3.23.8 24 | 25 | ## 開発ツール 26 | - npm: ^10.0.0 27 | - ESLint: ^8.57.0 28 | - TypeScript: ^5.0.2 29 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["next/core-web-vitals", "prettier"], 3 | "plugins": ["prettier"], 4 | "rules": { 5 | "prettier/prettier": "error" 6 | }, 7 | "ignorePatterns": ["public/scripts/*", "scripts/**", ".mypy_cache/**"] 8 | } 9 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: tegnike 2 | -------------------------------------------------------------------------------- /.github/workflows/claude.yml: -------------------------------------------------------------------------------- 1 | name: Claude Code 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_review_comment: 7 | types: [created] 8 | issues: 9 | types: [opened, assigned] 10 | pull_request_review: 11 | types: [submitted] 12 | 13 | jobs: 14 | claude: 15 | if: | 16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || 17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || 18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || 19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | pull-requests: read 24 | issues: read 25 | id-token: write 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v4 29 | with: 30 | fetch-depth: 1 31 | 32 | - name: Run Claude Code 33 | id: claude 34 | uses: anthropics/claude-code-action@beta 35 | with: 36 | anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} 37 | -------------------------------------------------------------------------------- /.github/workflows/lint-and-format.yml: -------------------------------------------------------------------------------- 1 | name: Lint and Format 2 | 3 | on: 4 | pull_request: 5 | types: [opened, synchronize] 6 | 7 | jobs: 8 | lint-and-format: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v4 14 | 15 | - name: Set up Node.js 16 | uses: actions/setup-node@v4 17 | with: 18 | node-version: '20' 19 | 20 | - name: Install dependencies 21 | run: npm install 22 | 23 | - name: Run ESLint 24 | run: | 25 | npm run lint 26 | git add . 27 | if ! git diff --exit-code; then 28 | echo "ESLint changes detected. Please commit the changes." 29 | exit 1 30 | fi 31 | 32 | - name: Run Prettier 33 | run: | 34 | npm run format 35 | git add . 36 | if ! git diff --exit-code; then 37 | echo "Prettier changes detected. Please commit the changes." 38 | exit 1 39 | fi 40 | -------------------------------------------------------------------------------- /.github/workflows/old/auto-translate.yml: -------------------------------------------------------------------------------- 1 | name: Auto Translate 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - develop 7 | types: [opened, synchronize, reopened] 8 | 9 | jobs: 10 | auto-translate: 11 | runs-on: ubuntu-latest 12 | permissions: 13 | contents: write 14 | pull-requests: write 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v3 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Set up Python 22 | uses: actions/setup-python@v4 23 | with: 24 | python-version: '3.10' 25 | 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install requests langgraph==0.2.60 pydantic langchain-openai==0.2.14 30 | 31 | - name: Run translation script 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 35 | PR_NUMBER: ${{ github.event.pull_request.number }} 36 | REPO_FULL_NAME: ${{ github.repository }} 37 | run: python scripts/auto_translate.py 38 | -------------------------------------------------------------------------------- /.github/workflows/old/issue-analyzer.yml: -------------------------------------------------------------------------------- 1 | name: Issue Analyzer 2 | 3 | on: 4 | issues: 5 | types: [opened] 6 | workflow_dispatch: 7 | inputs: 8 | issue_number: 9 | description: 'Issue Number' 10 | required: true 11 | issue_title: 12 | description: 'Issue Title' 13 | required: true 14 | issue_body: 15 | description: 'Issue Body' 16 | required: true 17 | 18 | jobs: 19 | analyze-issue: 20 | runs-on: ubuntu-latest 21 | permissions: 22 | issues: write 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v2 26 | 27 | - name: Set up Python 28 | uses: actions/setup-python@v2 29 | with: 30 | python-version: '3.x' 31 | 32 | - name: Install dependencies 33 | run: | 34 | python -m pip install --upgrade pip 35 | pip install requests anthropic==0.47.2 36 | 37 | - name: Analyze issue 38 | env: 39 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 40 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 41 | ISSUE_NUMBER: ${{ github.event.inputs.issue_number || github.event.issue.number }} 42 | ISSUE_TITLE: ${{ github.event.inputs.issue_title || github.event.issue.title }} 43 | ISSUE_BODY: ${{ github.event.inputs.issue_body || github.event.issue.body }} 44 | run: python scripts/analyze_issue.py 45 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Run Tests 2 | 3 | on: 4 | push: 5 | branches: [main, develop] 6 | pull_request: 7 | branches: [main, develop] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Setup Node.js 17 | uses: actions/setup-node@v3 18 | with: 19 | node-version: '20' 20 | cache: 'npm' 21 | 22 | - name: Install dependencies 23 | run: npm ci 24 | 25 | - name: Run linter 26 | run: npm run lint 27 | 28 | - name: Run tests 29 | run: npm test 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # env files 29 | .env 30 | .env*.local 31 | 32 | # vercel 33 | .vercel 34 | 35 | # typescript 36 | *.tsbuildinfo 37 | next-env.d.ts 38 | 39 | credentials.json 40 | 41 | # asdf 42 | .tool-versions 43 | 44 | certificates 45 | 46 | /public/slides/* 47 | !/public/slides/demo/ 48 | 49 | /logs/* 50 | 51 | # Live2D files 52 | public/live2d/* 53 | !public/live2d/nike01 54 | public/scripts/* 55 | !public/scripts/.gitkeep 56 | 57 | # VRM files 58 | /public/vrm/* 59 | !/public/vrm/AvatarSample_A.vrm 60 | !/public/vrm/AvatarSample_B.vrm 61 | !/public/vrm/AvatarSample_C.vrm 62 | !/public/vrm/nikechan_v1.vrm 63 | !/public/vrm/nikechan_v2.vrm 64 | !/public/vrm/nikechan_v2_outerwear.vrm 65 | 66 | # Background files 67 | /public/backgrounds/* 68 | !/public/backgrounds/bg-c.png 69 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # ビルド成果物 2 | .next/ 3 | build/ 4 | dist/ 5 | out/ 6 | 7 | # 依存関係 8 | node_modules/ 9 | 10 | # キャッシュ 11 | .mypy_cache/ 12 | .cache/ 13 | 14 | # スクリプト 15 | scripts/ 16 | 17 | # その他 18 | .git/ 19 | .DS_Store 20 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "es5", 4 | "semi": false, 5 | "endOfLine": "auto" 6 | } 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "editor.codeActionsOnSave": { 4 | "source.fixAll.eslint": "explicit" 5 | }, 6 | "editor.defaultFormatter": "esbenp.prettier-vscode" 7 | } 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # ベースイメージとしてNode.js 20を使用 2 | FROM node:20 3 | 4 | # 必要なシステムライブラリをインストール 5 | RUN apt-get update && apt-get install -y \ 6 | libcairo2-dev \ 7 | libpango1.0-dev \ 8 | libjpeg-dev \ 9 | libgif-dev \ 10 | librsvg2-dev \ 11 | pkg-config \ 12 | && rm -rf /var/lib/apt/lists/* 13 | 14 | # 作業ディレクトリを設定 15 | WORKDIR /app 16 | 17 | # package.jsonとpackage-lock.jsonをコピー 18 | COPY package*.json ./ 19 | 20 | # 依存関係をインストール 21 | RUN npm ci 22 | 23 | # アプリケーションのソースコードをコピー 24 | COPY . . 25 | 26 | # 3000番ポートを公開 27 | EXPOSE 3000 28 | 29 | # 開発モードでアプリケーションを起動 30 | CMD ["npm", "run", "dev"] 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Custom License 2 | 3 | This software is licensed under either of: 4 | 5 | 1. Non-Commercial Use License 6 | 2. Commercial License 7 | 8 | =============================================================================== 9 | Non-Commercial Use License 10 | =============================================================================== 11 | 12 | Permission is hereby granted, free of charge, to any person obtaining a copy 13 | of this software and associated documentation files (the "Software"), to use, 14 | copy, modify, and distribute copies of the Software for **non-commercial purposes**, subject to the following conditions: 15 | 16 | - The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | - The Software is provided "as is", without warranty of any kind, express or 19 | implied, including but not limited to the warranties of merchantability, 20 | fitness for a particular purpose, and noninfringement. 21 | 22 | *Note: This license applies only to non-commercial use. For commercial use, 23 | please refer to the Commercial License below.* 24 | 25 | =============================================================================== 26 | Commercial License 27 | =============================================================================== 28 | 29 | For commercial use of this software, a separate commercial license is required. 30 | 31 | Please contact support@aituberkit.com for details about commercial licensing. 32 | 33 | For detailed licensing information, terms and conditions, and FAQ, please visit: 34 | https://github.com/tegnike/aituber-kit/docs/license_en.md 35 | 36 | For commercial licensing inquiries: 37 | Email: support@aituberkit.com 38 | -------------------------------------------------------------------------------- /cursor.txt: -------------------------------------------------------------------------------- 1 | ## 新しいLLMサービスを追加する 2 | - src/features/chat/handlers.ts 3 | - src/components/settings/modelProvider.tsx 4 | - src/features/stores/settings.ts 5 | - locales/ja/translation.json 6 | - locales/ko/translation.json 7 | - locales/zh/translation.json 8 | - locales/en/translation.json 9 | - locales/vi/translation.json 10 | - locales/fr/translation.json 11 | - locales/es/translation.json 12 | - locales/pt/translation.json 13 | - locales/de/translation.json 14 | - locales/ru/translation.json 15 | - locales/it/translation.json 16 | - locales/ar/translation.json 17 | - locales/hi/translation.json 18 | - src/features/chat/vercelAIChat.ts 19 | - src/pages/api/aiChat.ts 20 | - src/features/chat/aiChatFactory.ts 21 | - .env.example 22 | 23 | ## 入力フォームからLLMを介して回答を得るまで 24 | src/components/form.tsx 25 | src/features/chat/handlers.ts 26 | src/features/chat/vercelAIChat.ts 27 | src/features/chat/aiChatFactory.ts 28 | src/pages/api/aiChat.ts 29 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | app: 5 | build: . 6 | ports: 7 | - '3000:3000' 8 | volumes: 9 | - .:/app 10 | - /app/node_modules 11 | env_file: 12 | - .env 13 | command: npm run dev 14 | -------------------------------------------------------------------------------- /docs/auto_translate.md: -------------------------------------------------------------------------------- 1 | # 自動翻訳機能 2 | 3 | このドキュメントでは、GitHub Actionsを使用した自動翻訳機能の概要と設定方法について説明します。 4 | 5 | ## 機能概要 6 | 7 | この自動翻訳機能は、特定のファイルが変更された際に、対応する多言語版ファイルを自動的に更新するものです。翻訳にはAnthropicのClaude AIを使用しています。 8 | 9 | ### 主な特徴 10 | 11 | - 指定された元ファイルの変更を検知 12 | - 対応する翻訳先ファイルを自動的に更新 13 | - 翻訳が必要かどうかをAIが判断 14 | - 翻訳結果をPRにコメントとして追加 15 | 16 | ## 対象ファイルと翻訳先 17 | 18 | 現在、以下のファイルが自動翻訳の対象となっています: 19 | 20 | 1. 元ファイル: `README.md` 21 | 22 | - 翻訳先: `docs/README_*.md`(各言語版) 23 | 24 | 2. 元ファイル: `docs/logo_license.md` 25 | 26 | - 翻訳先: `docs/logo_license_*.md`(各言語版) 27 | 28 | 3. 元ファイル: `docs/license-faq.md` 29 | 30 | - 翻訳先: `docs/license-faq_*.md`(各言語版) 31 | 32 | 4. 元ファイル: `docs/license.md` 33 | 34 | - 翻訳先: `docs/license_*.md`(各言語版) 35 | 36 | 5. 元ファイル: `docs/character_model_licence.md` 37 | 38 | - 翻訳先: `docs/character_model_licence_*.md`(各言語版) 39 | 40 | 6. 元ファイル: `locales/ja/translation.json` 41 | - 翻訳先: `locales/*/translation.json`(各言語版) 42 | 43 | 対象言語は現在、英語(en)、中国語(zh)、韓国語(ko)です。 44 | 45 | ## 実行タイミング 46 | 47 | 自動翻訳は以下のタイミングで実行されます: 48 | 49 | - developブランチへのプルリクエストが作成された時 50 | - developブランチへのプルリクエストが更新された時 51 | - developブランチへのプルリクエストが再オープンされた時 52 | 53 | ## 設定方法 54 | 55 | ### 必要な環境変数 56 | 57 | 自動翻訳機能を使用するには、以下の環境変数をGitHub Secretsに設定する必要があります: 58 | 59 | - `ANTHROPIC_API_KEY`: AnthropicのAPI Key 60 | 61 | ### 対象ファイルの追加・変更 62 | 63 | 対象ファイルを追加・変更するには、`scripts/auto_translate.py`の`FILE_MAPPINGS`変数を編集します: 64 | 65 | ```python 66 | FILE_MAPPINGS = { 67 | "元ファイルのパス": { 68 | "pattern": "翻訳先ファイルのパターン({}は言語コードに置換されます)", 69 | "type": "ファイルタイプ(markdown または json)" 70 | }, 71 | # 他のファイルマッピングを追加 72 | } 73 | ``` 74 | 75 | ### 対象言語の追加・変更 76 | 77 | 対象言語を追加・変更するには、`scripts/auto_translate.py`の`TARGET_LANGUAGES`変数を編集します: 78 | 79 | ```python 80 | TARGET_LANGUAGES = ["en", "zh", "ko"] # 言語コードを追加・変更 81 | ``` 82 | 83 | ## 動作の仕組み 84 | 85 | 1. PRで変更されたファイルを検出 86 | 2. 対象の元ファイルが変更されているか確認 87 | 3. 変更された元ファイルに対応する翻訳先ファイルを特定 88 | 4. 元ファイルと翻訳先ファイルの内容を比較 89 | - 翻訳先ファイルが存在しない場合は翻訳を実行 90 | - 翻訳先ファイルが存在する場合は、AIが翻訳の必要性を判断 91 | 5. 必要に応じてAIを使用して翻訳を生成 92 | 6. 翻訳先ファイルを更新 93 | 7. 翻訳結果をPRにコメントとして追加 94 | 95 | ## トラブルシューティング 96 | 97 | ### 翻訳が実行されない 98 | 99 | - PRが正しくdevelopブランチに向いているか確認してください 100 | - 対象ファイルが正しく設定されているか確認してください 101 | - GitHub Actionsのログを確認して、エラーが発生していないか確認してください 102 | 103 | ### 翻訳品質に問題がある 104 | 105 | - 翻訳プロンプトを調整することで、翻訳品質を向上させることができます 106 | - `scripts/auto_translate.py`の`translate_markdown`関数と`translate_json`関数のプロンプトを編集してください 107 | -------------------------------------------------------------------------------- /docs/character_model_licence.md: -------------------------------------------------------------------------------- 1 | # VRMモデルおよびLive2Dモデルの利用規約 2 | 3 | ## 概要 4 | 5 | この文書は、[aituber-kit](https://github.com/tegnike/aituber-kit) リポジトリで提供されている以下のモデルの利用規約を定めます: 6 | 7 | ### VRMモデル 8 | 9 | - [ニケちゃんVRMモデルv1](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v1.vrm) 10 | - [ニケちゃんVRMモデルv2](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2.vrm) 11 | - [ニケちゃんVRMモデルv2(アウター)](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2_outerwear.vrm) 12 | 13 | ### Live2Dモデル 14 | 15 | - [ニケちゃんLive2Dモデル](https://github.com/tegnike/aituber-kit/blob/main/public/live2d/nike01) 16 | 17 | ## 著作権 18 | 19 | ### VRMモデル 20 | 21 | VRMモデルの著作権は開発者に帰属します。ただし、著作人格権は各モデルの制作者に帰属します。 22 | 23 | ### Live2Dモデル 24 | 25 | Live2Dモデルの著作権は、モデルの制作者に帰属します。 26 | 27 | ## 許可される利用 28 | 29 | - 個人使用目的での使用 30 | - 非商用のプロジェクトでのデモンストレーション目的での使用 31 | - 本リポジトリやモデルの紹介を目的とした使用 32 | 33 | ## 禁止される利用 34 | 35 | 以下の行為は原則として禁止されますが、VRMモデルについては開発者の事前の許諾を得ることで、部分的に許可される可能性があります: 36 | 37 | - 商業目的での使用、販売、またはレンタル 38 | - モデルの改変や派生作品の作成とそれらの配布 39 | - 第三者に対する再配布 40 | - 本リポジトリやモデルの所有者の名誉や信用を毀損する方法での使用 41 | - 公序良俗に反する方法での使用 42 | 43 | なお、Live2Dモデルについては、上記の禁止事項は一切の例外なく適用されます。 44 | 45 | ## 免責事項 46 | 47 | これらのVRMモデルおよびLive2Dモデルは「現状有姿」で提供され、特定の目的への適合性、非侵害性の保証はありません。モデル使用によって生じたいかなる損害に対しても、リポジトリの所有者は一切の責任を負いません。 48 | 49 | ## 規約の変更 50 | 51 | リポジトリの所有者は、予告なしにこの利用規約を変更する権利を有します。利用者は、定期的に規約を確認し、変更点を把握する責任があります。 52 | 53 | ## 連絡先 54 | 55 | モデルの使用に関して不明点がある場合は、リポジトリのイシューを通じてお問い合わせください。 56 | 57 | ## モデル制作者情報 58 | 59 | ### VRMモデル 60 | 61 | - ニケちゃんVRMモデルv1:[琳 様](https://x.com/rin_tyn25) 62 | - ニケちゃんVRMモデルv2:[たまごん 様](https://x.com/_TAMA_GON_) 63 | - ニケちゃんVRMモデルv2(アウター):[たまごん 様](https://x.com/_TAMA_GON_) 64 | 65 | ### Live2Dモデル 66 | 67 | - イラストレーター:[綾川まとい 様](https://x.com/matoi_e_ma) 68 | - モデラー:[チッパー 様](https://x.com/Chipper_tyvt) 69 | -------------------------------------------------------------------------------- /docs/character_model_licence_ko.md: -------------------------------------------------------------------------------- 1 | # VRM 및 Live2D 모델 이용 약관 2 | 3 | ## 개요 4 | 5 | 이 문서는 [aituber-kit](https://github.com/tegnike/aituber-kit) 리포지토리에서 제공되는 다음 모델들의 이용 약관을 규정합니다: 6 | 7 | ### VRM 모델 8 | 9 | - [니케짱 VRM 모델 v1](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v1.vrm) 10 | - [니케짱 VRM 모델 v2](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2.vrm) 11 | - [니케짱 VRM 모델 v2 (아우터)](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2_outerwear.vrm) 12 | 13 | ### Live2D 모델 14 | 15 | - [니케짱 Live2D 모델](https://github.com/tegnike/aituber-kit/blob/main/public/live2d/nike01) 16 | 17 | ## 저작권 18 | 19 | ### VRM 모델 20 | 21 | VRM 모델의 저작권은 개발자에게 귀속됩니다. 단, 저작인격권은 각 모델의 제작자에게 귀속됩니다. 22 | 23 | ### Live2D 모델 24 | 25 | Live2D 모델의 저작권은 모델 제작자에게 귀속됩니다. 26 | 27 | ## 허가된 사용 28 | 29 | - 개인 사용 목적 30 | - 비상업적 프로젝트에서의 데모 목적 사용 31 | - 본 리포지토리와 모델 소개를 위한 사용 32 | 33 | ## 금지된 사용 34 | 35 | 다음 행위는 원칙적으로 금지되나, VRM 모델의 경우 개발자의 사전 허가를 받으면 부분적으로 허가될 수 있습니다: 36 | 37 | - 상업적 목적의 사용, 판매 또는 대여 38 | - 모델의 수정이나 파생 작품의 제작 및 배포 39 | - 제3자에 대한 재배포 40 | - 본 리포지토리나 모델 소유자의 명예나 신용을 훼손하는 방식의 사용 41 | - 공서양속에 반하는 방식의 사용 42 | 43 | 단, Live2D 모델의 경우, 위의 금지 사항은 어떠한 예외도 없이 적용됩니다. 44 | 45 | ## 면책 조항 46 | 47 | 이러한 VRM 및 Live2D 모델은 "있는 그대로" 제공되며, 특정 목적에 대한 적합성, 비침해성에 대한 보증은 없습니다. 모델 사용으로 인해 발생하는 어떠한 손해에 대해서도 리포지토리 소유자는 일체의 책임을 지지 않습니다. 48 | 49 | ## 규약 변경 50 | 51 | 저장소 소유자는 사전 고지 없이 이 이용 약관을 변경할 권리가 있습니다. 이용자는 정기적으로 규약을 확인하고 변경 사항을 숙지할 책임이 있습니다. 52 | 53 | ## 연락처 54 | 55 | 모델 사용에 관해 문의사항이 있으시면 저장소의 이슈를 통해 문의해 주시기 바랍니다. 56 | 57 | ## 모델 제작자 정보 58 | 59 | ### VRM 모델 60 | 61 | - Nikechan VRM Model v1: [琳 様](https://x.com/rin_tyn25) 62 | - Nikechan VRM Model v2: [たまごん 様](https://x.com/_TAMA_GON_) 63 | - Nikechan VRM Model v2 (Outerwear): [たまごん 様](https://x.com/_TAMA_GON_) 64 | 65 | ### Live2D 모델 66 | 67 | - 일러스트레이터: [綾川まとい 様](https://x.com/matoi_e_ma) 68 | - 모델러: [チッパー 様](https://x.com/Chipper_tyvt) 69 | -------------------------------------------------------------------------------- /docs/character_model_licence_zh.md: -------------------------------------------------------------------------------- 1 | # VRM和Live2D模型使用条款 2 | 3 | ## 概述 4 | 5 | 本文档规定了[aituber-kit](https://github.com/tegnike/aituber-kit)仓库中提供的以下模型的使用条款: 6 | 7 | ### VRM模型 8 | 9 | - [妮可酱VRM模型v1](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v1.vrm) 10 | - [妮可酱VRM模型v2](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2.vrm) 11 | - [妮可酱VRM模型v2(外套版)](https://github.com/tegnike/aituber-kit/blob/main/public/vrm/nikechan_v2_outerwear.vrm) 12 | 13 | ### Live2D模型 14 | 15 | - [妮可酱Live2D模型](https://github.com/tegnike/aituber-kit/blob/main/public/live2d/nike01) 16 | 17 | ## 版权 18 | 19 | ### VRM模型 20 | 21 | VRM模型的版权归开发者所有。但是,精神权利归各模型的创作者所有。 22 | 23 | ### Live2D模型 24 | 25 | Live2D模型的版权归模型创作者所有。 26 | 27 | ## 允许的使用 28 | 29 | - 个人使用目的 30 | - 非商业项目中的演示目的使用 31 | - 用于介绍本仓库和模型 32 | 33 | ## 禁止的使用 34 | 35 | 以下行为原则上禁止,但对于VRM模型,在获得开发者事先同意的情况下可能会部分允许: 36 | 37 | - 商业目的的使用、销售或租赁 38 | - 修改模型或创建和分发衍生作品 39 | - 向第三方再分发 40 | - 以损害本仓库或模型所有者名誉和信用的方式使用 41 | - 以违反公序良俗的方式使用 42 | 43 | 对于Live2D模型,上述禁止事项无任何例外地适用。 44 | 45 | ## 免责声明 46 | 47 | 这些VRM和Live2D模型按"现状"提供,不保证适合特定目的或不侵权。对于因使用模型而产生的任何损害,仓库所有者概不负责。 48 | 49 | ## 协议的更改 50 | 51 | 存储库的所有者有权在不事先通知的情况下更改此使用协议。用户有责任定期检查协议并了解更改。 52 | 53 | ## 联系方式 54 | 55 | 如果您对模型的使用有任何疑问,请通过存储库的问题(issues)与我们联系。 56 | 57 | ## 模型制作者信息 58 | 59 | ### VRM模型 60 | 61 | - Nikechan VRM Model v1: [琳 様](https://x.com/rin_tyn25) 62 | - Nikechan VRM Model v2: [たまごん 様](https://x.com/_TAMA_GON_) 63 | - Nikechan VRM Model v2 (Outerwear): [たまごん 様](https://x.com/_TAMA_GON_) 64 | 65 | ### Live2D模型 66 | 67 | - 插画师:[綾川まとい 様](https://x.com/matoi_e_ma) 68 | - 建模师:[チッパー 様](https://x.com/Chipper_tyvt) 69 | -------------------------------------------------------------------------------- /docs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/docs/logo.png -------------------------------------------------------------------------------- /docs/logo_licence.md: -------------------------------------------------------------------------------- 1 | # ロゴの利用規約 2 | 3 | ## 概要 4 | 5 | この文書は、[aituber-kit](https://github.com/tegnike/aituber-kit) リポジトリで使用されているロゴの利用規約を定めます。 6 | 7 | ## 著作権 8 | 9 | このロゴの著作権は、ロゴの制作者に帰属します。 10 | 11 | ## 許可される利用 12 | 13 | - 本リポジトリに関連する非商用目的での使用 14 | - 本リポジトリを紹介する際のロゴの使用 15 | - リンク目的での使用(本リポジトリに関するWebサイトへのリンクのみ) 16 | 17 | ## 禁止される利用 18 | 19 | - 商業目的での使用(事前の書面による許可がない場合) 20 | - ロゴの改変や派生作品の作成 21 | - 第三者に対する再配布 22 | - 本リポジトリや所有者の名誉や信用を毀損する方法での使用 23 | 24 | ## 免責事項 25 | 26 | このロゴは「現状有姿」で提供され、特定の目的への適合性、非侵害性の保証はありません。ロゴ使用によって生じたいかなる損害に対しても、リポジトリの所有者は一切の責任を負いません。 27 | 28 | ## 規約の変更 29 | 30 | リポジトリの所有者は、予告なしにこの利用規約を変更する権利を有します。利用者は、定期的に規約を確認し、変更点を把握する責任があります。 31 | 32 | ## 連絡先 33 | 34 | ロゴの使用に関して不明点がある場合は、リポジトリのイシューを通じてお問い合わせください。 35 | 36 | ## ロゴ制作者情報 37 | 38 | - ロゴの制作者:[Ruka Design](https://coconala.com/users/2208636) 39 | -------------------------------------------------------------------------------- /docs/logo_licence_en.md: -------------------------------------------------------------------------------- 1 | # Terms of Use for the Logo 2 | 3 | ## Overview 4 | 5 | This document sets out the terms of use for the logo used in the [aituber-kit](https://github.com/tegnike/aituber-kit) repository. 6 | 7 | ## Copyright 8 | 9 | The copyright of this logo belongs to the creator of the logo. 10 | 11 | ## Permitted Use 12 | 13 | - Non-commercial use related to this repository 14 | - Use of the logo when introducing this repository 15 | - Use for linking purposes (only to websites related to this repository) 16 | 17 | ## Prohibited Use 18 | 19 | - Commercial use (without prior written permission) 20 | - Modification of the logo or creation of derivative works 21 | - Redistribution to third parties 22 | - Use in a manner that damages the reputation or credit of this repository or its owner 23 | 24 | ## Disclaimer 25 | 26 | This logo is provided "as is" and there is no guarantee of fitness for a particular purpose or non-infringement. The owner of the repository is not responsible for any damage caused by the use of the logo. 27 | 28 | ## Changes to the Agreement 29 | 30 | The owner of the repository has the right to change this agreement without notice. Users are responsible for regularly checking the agreement and understanding the changes. 31 | 32 | ## Contact 33 | 34 | If you have any questions about the use of the logo, please contact us through the repository's issues. 35 | 36 | ## Logo Creator Information 37 | 38 | - Logo Creator: [Ruka Design](https://coconala.com/users/2208636) 39 | -------------------------------------------------------------------------------- /docs/logo_licence_ko.md: -------------------------------------------------------------------------------- 1 | # 로고 이용 약관 2 | 3 | ## 개요 4 | 5 | 이 문서는 [aituber-kit](https://github.com/tegnike/aituber-kit) 저장소에서 사용되는 로고의 이용 약관을 정의합니다. 6 | 7 | ## 저작권 8 | 9 | 이 로고의 저작권은 로고 제작자에게 있습니다. 10 | 11 | ## 허용되는 이용 12 | 13 | - 본 저장소와 관련된 비상업적 목적으로 사용 14 | - 본 저장소를 소개할 때 로고 사용 15 | - 링크 목적으로 사용(본 저장소 관련 웹사이트로의 링크만 해당) 16 | 17 | ## 금지되는 이용 18 | 19 | - 상업적 목적으로 사용(사전 서면 허가가 없는 경우) 20 | - 로고 수정 또는 파생 작품 제작 21 | - 제3자에 대한 재배포 22 | - 본 저장소나 소유자의 명예나 신용을 훼손하는 방식으로 사용 23 | 24 | ## 면책 조항 25 | 26 | 이 로고는 "있는 그대로" 제공되며, 특정 목적에 대한 적합성이나 비침해성에 대한 보장은 없습니다. 로고 사용으로 인해 발생하는 손해에 대해 저장소 소유자는 어떠한 책임도 지지 않습니다. 27 | 28 | ## 규약 변경 29 | 30 | 저장소 소유자는 사전 고지 없이 이 이용 약관을 변경할 권리가 있습니다. 이용자는 정기적으로 규약을 확인하고 변경 사항을 숙지할 책임이 있습니다. 31 | 32 | ## 연락처 33 | 34 | 로고 사용과 관련하여 궁금한 점이 있으면 저장소의 이슈를 통해 문의하시기 바랍니다. 35 | 36 | ## 로고 제작자 정보 37 | 38 | - 로고 제작자: [Ruka Design](https://coconala.com/users/2208636) 39 | -------------------------------------------------------------------------------- /docs/logo_licence_pl.md: -------------------------------------------------------------------------------- 1 | # Warunki użytkowania logo 2 | 3 | ## Przegląd 4 | 5 | Ten dokument określa warunki użytkowania logo używanego w repozytorium [aituber-kit](https://github.com/tegnike/aituber-kit). 6 | 7 | ## Prawa autorskie 8 | 9 | Prawa autorskie do tego logo należą do jego twórcy. 10 | 11 | ## Dozwolone użycie 12 | 13 | - Użycie niekomercyjne związane z tym repozytorium 14 | - Użycie logo podczas prezentacji tego repozytorium 15 | - Użycie w celach linkowania (tylko do stron internetowych związanych z tym repozytorium) 16 | 17 | ## Zabronione użycie 18 | 19 | - Użycie komercyjne (bez uprzedniej pisemnej zgody) 20 | - Modyfikacja logo lub tworzenie dzieł pochodnych 21 | - Redystrybucja do osób trzecich 22 | - Użycie w sposób naruszający honor lub reputację tego repozytorium lub jego właściciela 23 | 24 | ## Wyłączenie odpowiedzialności 25 | 26 | To logo jest dostarczane "tak jak jest", bez żadnych gwarancji przydatności do określonego celu czy nienaruszania praw. Właściciel repozytorium nie ponosi żadnej odpowiedzialności za jakiekolwiek szkody wynikające z użycia logo. 27 | 28 | ## Zmiany warunków 29 | 30 | Właściciel repozytorium zastrzega sobie prawo do zmiany tych warunków użytkowania bez uprzedzenia. Użytkownicy są odpowiedzialni za regularne sprawdzanie warunków i zapoznawanie się ze zmianami. 31 | 32 | ## Kontakt 33 | 34 | W przypadku pytań dotyczących użycia logo, prosimy o kontakt poprzez system zgłoszeń (issues) w repozytorium. 35 | 36 | ## Informacje o twórcy logo 37 | 38 | - Twórca logo: [Ruka Design](https://coconala.com/users/2208636) 39 | -------------------------------------------------------------------------------- /docs/logo_licence_zh.md: -------------------------------------------------------------------------------- 1 | # 商标使用条款 2 | 3 | ## 概述 4 | 5 | 本文档规定了在[aituber-kit](https://github.com/tegnike/aituber-kit)存储库中使用的商标的使用条款。 6 | 7 | ## 版权 8 | 9 | 此商标的版权属于标志的创作者。 10 | 11 | ## 允许使用 12 | 13 | - 与本存储库相关的非商业目的使用 14 | - 在介绍本存储库时使用商标 15 | - 用于链接目的(仅限于与本存储库相关的网站) 16 | 17 | ## 禁止使用 18 | 19 | - 商业目的使用(未经事先书面许可) 20 | - 修改商标或创建衍生作品 21 | - 分发给第三方 22 | - 以损害本存储库或其所有者的名誉或信用的方式使用 23 | 24 | ## 免责声明 25 | 26 | 此商标以“现状有姿”提供,不保证适用于特定目的或不侵权。对于使用商标造成的任何损害,存储库的所有者不承担任何责任。 27 | 28 | ## 协议的更改 29 | 30 | 存储库的所有者有权在不事先通知的情况下更改此使用协议。用户有责任定期检查协议并了解更改。 31 | 32 | ## 联系方式 33 | 34 | 如果对商标的使用有任何疑问,请通过存储库的问题联系我们。 35 | 36 | ## 标志创作者信息 37 | 38 | - 标志创作者: [Ruka Design](https://coconala.com/users/2208636) 39 | -------------------------------------------------------------------------------- /electron.mjs: -------------------------------------------------------------------------------- 1 | import { app, BrowserWindow, screen } from 'electron' 2 | import path from 'path' 3 | import { fileURLToPath } from 'url' 4 | import isDev from 'electron-is-dev' 5 | import waitOn from 'wait-on' 6 | 7 | const __dirname = path.dirname(fileURLToPath(import.meta.url)) 8 | 9 | let mainWindow 10 | 11 | async function createWindow() { 12 | const { width, height } = screen.getPrimaryDisplay().workAreaSize 13 | 14 | mainWindow = new BrowserWindow({ 15 | width: width, 16 | height: height, 17 | show: false, // ウィンドウを即表示せず、準備ができるまで待ちます 18 | webPreferences: { 19 | nodeIntegration: false, // セキュリティ向上のために false に 20 | contextIsolation: true, // セキュリティ向上のために true に 21 | webSecurity: false, // CORSエラーを回避 22 | preload: path.join(__dirname, 'preload.js'), // もしプリロードスクリプトがあれば 23 | devTools: false, // 開発者モードを表示させない 24 | }, 25 | transparent: true, // ウィンドウを透明にする 26 | // frame: false, // フレームをなくす 27 | // resizable: false, // 画面の大きさを変えさせない 28 | hasShadow: false, // ウィンドウに影をつけない 29 | }) 30 | 31 | if (isDev) { 32 | // 開発モードの場合、ローカルサーバーが準備できるのを待ちます 33 | await waitOn({ resources: ['http://localhost:3000'] }) 34 | mainWindow.loadURL('http://localhost:3000') 35 | } else { 36 | // 本番モードの場合、ファイルから読み込みます 37 | mainWindow.loadFile('path/to/your/production/index.html') 38 | } 39 | 40 | // 'ready-to-show' イベントが発火したらウィンドウを表示 41 | mainWindow.once('ready-to-show', () => { 42 | mainWindow.show() 43 | mainWindow.webContents.openDevTools() 44 | }) 45 | } 46 | 47 | app.on('ready', createWindow) 48 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | const nextJest = require('next/jest') 2 | 3 | const createJestConfig = nextJest({ 4 | dir: './', 5 | }) 6 | 7 | const customJestConfig = { 8 | setupFilesAfterEnv: ['/jest.setup.js'], 9 | testEnvironment: 'jest-environment-jsdom', 10 | moduleNameMapper: { 11 | '^@/(.*)$': '/src/$1', 12 | '^canvas$': '/src/__mocks__/node-canvas.js', 13 | '^three/examples/jsm/(.*)$': 14 | '/src/__mocks__/three/examples/jsm/$1', 15 | }, 16 | testMatch: ['**/__tests__/**/*.test.[jt]s?(x)'], 17 | modulePathIgnorePatterns: [ 18 | 'node_modules/canvas', 19 | 'node_modules/@ffmpeg-installer', 20 | 'node_modules/fluent-ffmpeg', 21 | ], 22 | transformIgnorePatterns: [ 23 | 'node_modules/(?!(@pixiv/three-vrm|three/examples/jsm))', 24 | ], 25 | moduleDirectories: ['node_modules', '/src/__mocks__'], 26 | testPathIgnorePatterns: ['/node_modules/', '/\\.next/'], 27 | setupFiles: ['/jest.setup.canvas.js'], 28 | } 29 | 30 | module.exports = createJestConfig(customJestConfig) 31 | -------------------------------------------------------------------------------- /jest.setup.js: -------------------------------------------------------------------------------- 1 | import '@testing-library/jest-dom' 2 | import './src/__mocks__/readableStream.js' 3 | import 'web-streams-polyfill/dist/polyfill.js' 4 | -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | assetPrefix: process.env.BASE_PATH || '', 5 | basePath: process.env.BASE_PATH || '', 6 | trailingSlash: true, 7 | publicRuntimeConfig: { 8 | root: process.env.BASE_PATH || '', 9 | }, 10 | optimizeFonts: false, 11 | } 12 | 13 | module.exports = nextConfig 14 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /public/backgrounds/bg-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/backgrounds/bg-c.png -------------------------------------------------------------------------------- /public/bg-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/bg-c.png -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/favicon.ico -------------------------------------------------------------------------------- /public/github-mark-white.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/idle_loop.vrma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/idle_loop.vrma -------------------------------------------------------------------------------- /public/images/ai-logos/anthropic.svg: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 15 | 16 | -------------------------------------------------------------------------------- /public/images/ai-logos/azure.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/images/ai-logos/cohere.svg: -------------------------------------------------------------------------------- 1 | 2 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 20 | 21 | 23 | 24 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /public/images/ai-logos/custom-api.svg: -------------------------------------------------------------------------------- 1 | 2 | API -------------------------------------------------------------------------------- /public/images/ai-logos/deepseek.svg: -------------------------------------------------------------------------------- 1 | DeepSeek -------------------------------------------------------------------------------- /public/images/ai-logos/dify.svg: -------------------------------------------------------------------------------- 1 | Dify -------------------------------------------------------------------------------- /public/images/ai-logos/fireworks.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | 10 | 13 | 16 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /public/images/ai-logos/google.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/images/ai-logos/groq.svg: -------------------------------------------------------------------------------- 1 | Groq -------------------------------------------------------------------------------- /public/images/ai-logos/local.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/images/ai-logos/local.svg -------------------------------------------------------------------------------- /public/images/ai-logos/openai.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/images/ai-logos/openrouter.svg: -------------------------------------------------------------------------------- 1 | OpenRouter -------------------------------------------------------------------------------- /public/images/ai-logos/perplexity.svg: -------------------------------------------------------------------------------- 1 | Perplexity -------------------------------------------------------------------------------- /public/images/ai-logos/xai.svg: -------------------------------------------------------------------------------- 1 | Grok -------------------------------------------------------------------------------- /public/images/icons/external-link.svg: -------------------------------------------------------------------------------- 1 | 8 | 13 | -------------------------------------------------------------------------------- /public/images/icons/screen-share.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | screen_share 5 | Created with Sketch. 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /public/images/icons/stop.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | stop 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /public/images/setting-icons/ai-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ai 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /public/images/setting-icons/basic-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 23 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /public/images/setting-icons/conversation-history.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 12 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /public/images/setting-icons/microphone-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 10 | 11 | 12 | 14 | 15 | 17 | 19 | 21 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /public/images/setting-icons/other-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /public/images/setting-icons/slide-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 11 | 17 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /public/images/setting-icons/voice-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /public/images/setting-icons/youtube-settings.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10 | 11 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Angry.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamHairpinChange", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamAngerBrows", 12 | "Value": 0.5, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamMouthOpenY", 17 | "Value": 0, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamHeMouth", 22 | "Value": 1, 23 | "Blend": "Add" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Focus.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamEyeLOpen", 7 | "Value": -0.377, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamEyeROpen", 12 | "Value": -0.377, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamMouthOpenY", 17 | "Value": 0.308, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamAngerBrows", 22 | "Value": 0.663, 23 | "Blend": "Add" 24 | }, 25 | { 26 | "Id": "ParamTroubleBrows", 27 | "Value": 0.288, 28 | "Blend": "Add" 29 | }, 30 | { 31 | "Id": "ParamHairpinChange", 32 | "Value": 1, 33 | "Blend": "Add" 34 | }, 35 | { 36 | "Id": "ParamEyeBallY", 37 | "Value": 0, 38 | "Blend": "Add" 39 | }, 40 | { 41 | "Id": "ParamMouthForm", 42 | "Value": -0.308, 43 | "Blend": "Add" 44 | }, 45 | { 46 | "Id": "ParamBodyAngleY", 47 | "Value": -3.077, 48 | "Blend": "Add" 49 | } 50 | ] 51 | } 52 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/HairpinChange.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "Parameters": [ 4 | { 5 | "Id": "ParamHairpinChange", 6 | "Value": 1, 7 | "Blend": "Add" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Happy.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamMouthForm", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamHairpinChange", 12 | "Value": 1, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamEyeRSmile", 17 | "Value": 1, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamEyeLSmile", 22 | "Value": 1, 23 | "Blend": "Add" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Happy2.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamHairpinChange", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamEyeLSmile", 12 | "Value": 1, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamEyeLOpen", 17 | "Value": -1, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamEyeROpen", 22 | "Value": -1, 23 | "Blend": "Add" 24 | }, 25 | { 26 | "Id": "ParamEyeRSmile", 27 | "Value": 1, 28 | "Blend": "Add" 29 | }, 30 | { 31 | "Id": "ParamMouthForm", 32 | "Value": 0.6, 33 | "Blend": "Add" 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Neutral.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamAngerBrows", 7 | "Value": 0, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamTroubleBrows", 12 | "Value": 0, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamHeMouth", 17 | "Value": 0, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamHairpinChange", 22 | "Value": 1, 23 | "Blend": "Add" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/NoSmile.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamMouthForm", 7 | "Value": -1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamHairpinChange", 12 | "Value": 1, 13 | "Blend": "Add" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Sad.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamHairpinChange", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamTroubleBrows", 12 | "Value": 1, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamHeMouth", 17 | "Value": 1, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamMouthOpenY", 22 | "Value": 0.224, 23 | "Blend": "Add" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Sad2.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamAngleZ", 7 | "Value": 14.471, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamTroubleBrows", 12 | "Value": 1, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamHeMouth", 17 | "Value": 0.929, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamMouthOpenY", 22 | "Value": 0.259, 23 | "Blend": "Add" 24 | }, 25 | { 26 | "Id": "ParamHairpinChange", 27 | "Value": 1, 28 | "Blend": "Add" 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Sleep.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "Parameters": [ 4 | { 5 | "Id": "ParamHairpinChange", 6 | "Value": 1, 7 | "Blend": "Add" 8 | }, 9 | { 10 | "Id": "ParamEyeLOpen", 11 | "Value": -1, 12 | "Blend": "Add" 13 | }, 14 | { 15 | "Id": "ParamEyeROpen", 16 | "Value": -1, 17 | "Blend": "Add" 18 | }, 19 | { 20 | "Id": "ParamEyeLSmile", 21 | "Value": 0, 22 | "Blend": "Add" 23 | }, 24 | { 25 | "Id": "ParamMouthOpenY", 26 | "Value": 0, 27 | "Blend": "Add" 28 | }, 29 | { 30 | "Id": "ParamMouthForm", 31 | "Value": -1, 32 | "Blend": "Add" 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Think.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamAngleZ", 7 | "Value": 20.192, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamEyeLOpen", 12 | "Value": -0.158, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamEyeLSmile", 17 | "Value": 0, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamEyeROpen", 22 | "Value": -0.031, 23 | "Blend": "Add" 24 | }, 25 | { 26 | "Id": "ParamEyeRSmile", 27 | "Value": 0, 28 | "Blend": "Add" 29 | }, 30 | { 31 | "Id": "ParamEyeBallX", 32 | "Value": -0.788, 33 | "Blend": "Add" 34 | }, 35 | { 36 | "Id": "ParamEyeBallY", 37 | "Value": 0.558, 38 | "Blend": "Add" 39 | }, 40 | { 41 | "Id": "ParamMouthForm", 42 | "Value": -0.654, 43 | "Blend": "Add" 44 | }, 45 | { 46 | "Id": "ParamTroubleBrows", 47 | "Value": 0, 48 | "Blend": "Add" 49 | }, 50 | { 51 | "Id": "ParamHeMouth", 52 | "Value": 0.5, 53 | "Blend": "Add" 54 | }, 55 | { 56 | "Id": "ParamHairpinChange", 57 | "Value": 1, 58 | "Blend": "Add" 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Think2.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamHairpinChange", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamEyeBallY", 12 | "Value": 0.288, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamEyeBallX", 17 | "Value": -0.712, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamHeMouth", 22 | "Value": 0.49, 23 | "Blend": "Add" 24 | }, 25 | { 26 | "Id": "ParamMouthForm", 27 | "Value": -0.635, 28 | "Blend": "Add" 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Troubled.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "FadeInTime": 0.75, 4 | "Parameters": [ 5 | { 6 | "Id": "ParamHairpinChange", 7 | "Value": 1, 8 | "Blend": "Add" 9 | }, 10 | { 11 | "Id": "ParamHeMouth", 12 | "Value": 0.471, 13 | "Blend": "Add" 14 | }, 15 | { 16 | "Id": "ParamTroubleBrows", 17 | "Value": 1, 18 | "Blend": "Add" 19 | }, 20 | { 21 | "Id": "ParamAngerBrows", 22 | "Value": 0.346, 23 | "Blend": "Add" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /public/live2d/nike01/expressions/Zitome.exp3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Type": "Live2D Expression", 3 | "Parameters": [ 4 | { 5 | "Id": "ParamHairpinChange", 6 | "Value": 1, 7 | "Blend": "Add" 8 | }, 9 | { 10 | "Id": "ParamEyeLOpen", 11 | "Value": -0.4, 12 | "Blend": "Add" 13 | }, 14 | { 15 | "Id": "ParamEyeROpen", 16 | "Value": -0.4, 17 | "Blend": "Add" 18 | }, 19 | { 20 | "Id": "ParamMouthForm", 21 | "Value": -1, 22 | "Blend": "Add" 23 | }, 24 | { 25 | "Id": "ParamMouthOpenY", 26 | "Value": 0.317, 27 | "Blend": "Add" 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /public/live2d/nike01/items_pinned_to_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "FileMetadata": { 3 | "LastSavedVTubeStudioVersion": "1.26.11", 4 | "LastSavedPlatform": "Steam", 5 | "LastSavedDateUTC": "Thursday, 07 September 2023, 03:03:14", 6 | "LastSavedDateLocalTime": "Thursday, 07 September 2023, 12:03:14", 7 | "LastSavedDateUnixMillisecondTimestamp": "1694055794809" 8 | }, 9 | "SceneName": "", 10 | "SceneGroupName": "", 11 | "SceneModel": "", 12 | "SceneID": "", 13 | "Items": [] 14 | } 15 | -------------------------------------------------------------------------------- /public/live2d/nike01/motions/Motion9.motion3.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": 3, 3 | "Meta": { 4 | "Duration": 1.633, 5 | "Fps": 30.0, 6 | "Loop": false, 7 | "AreBeziersRestricted": true, 8 | "CurveCount": 1, 9 | "TotalSegmentCount": 5, 10 | "TotalPointCount": 16, 11 | "UserDataCount": 0, 12 | "TotalUserDataSize": 0 13 | }, 14 | "Curves": [ 15 | { 16 | "Target": "Parameter", 17 | "Id": "ParamAngleX", 18 | "Segments": [ 19 | 0, 0, 1, 0.111, 0, 0.222, -15, 0.333, -15, 1, 0.444, -15, 0.556, 15, 20 | 0.667, 15, 1, 0.778, 15, 0.889, -15, 1, -15, 1, 1.111, -15, 1.222, 15, 21 | 1.333, 15, 1, 1.433, 15, 1.533, 2.85, 1.633, 0.42 22 | ] 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /public/live2d/nike01/nike01.8192/texture_00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/live2d/nike01/nike01.8192/texture_00.png -------------------------------------------------------------------------------- /public/live2d/nike01/nike01.moc3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/live2d/nike01/nike01.moc3 -------------------------------------------------------------------------------- /public/ogp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/ogp.png -------------------------------------------------------------------------------- /public/scripts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/scripts/.gitkeep -------------------------------------------------------------------------------- /public/slides/demo/images/demo-folder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/slides/demo/images/demo-folder.png -------------------------------------------------------------------------------- /public/slides/demo/images/file-structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/slides/demo/images/file-structure.png -------------------------------------------------------------------------------- /public/slides/demo/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/slides/demo/images/logo.png -------------------------------------------------------------------------------- /public/slides/demo/images/settings-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/slides/demo/images/settings-screen.png -------------------------------------------------------------------------------- /public/slides/demo/images/start-button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/slides/demo/images/start-button.png -------------------------------------------------------------------------------- /public/slides/demo/scripts.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "page": 0, 4 | "line": "これからAITuberKitのスライドモードについての解説を始めます。", 5 | "notes": "" 6 | }, 7 | { 8 | "page": 1, 9 | "line": "スライドモードの開発を始める前に、AITuberKitで会話できている必要があるので準備しておいてください。ただし、スライドモードはOpenAI, Anthropic Claude, Google Geminiのみ対応しています。以降はこの前提で説明していきたいと思いますので、まだの人は解説したnoteを参考にしてください。AITuberKitで検索するとでてくると思います。", 10 | "notes": "AIサービスはそれ以外にGroq, ローカルLLM, Difyが選択可能です。これらが選択できないのは、回答するのにある程度LLMの性能が必要だからです。Anthropicでもclaude-haikuはあまり効果的な回答ができない可能性があります。ここで選択されたAIサービスは、質問の回答生成時に使用されます。" 11 | }, 12 | { 13 | "page": 2, 14 | "line": "それでは解説を始めます。まずスライドの用意をしておきましょう。最低限必要なのは、scripts.jsonとslides.mdです。scripts.jsonには台本を記述します。slides.mdにはmarpで作成したスライドを記述してください。", 15 | "notes": "scripts.jsonにはAIキャラのセリフを予め記載しておきます。notesには追加の情報を記載してください。これが質問のときに使用されます。スライドとは関係ない追加情報はsupplement.jsonに記載してください。" 16 | }, 17 | { 18 | "page": 3, 19 | "line": "詳細は、publicフォルダにあるdemoを参照して作成してください。このフォルダをそのままコピーすると簡単かもしれません。demoと同じところに任意のフォルダ名で配置してください。", 20 | "notes": "" 21 | }, 22 | { 23 | "page": 4, 24 | "line": "設定画面を開き、スライドモードを有効にしてください。このとき、使用するスライドに先ほど作成したフォルダの名称を記入してください。それでは設定画面を閉じましょう。", 25 | "notes": "OpenAI, Anthropic Claude, Google Gemini以外を選択しているとスライドモードを有効にすることができません。" 26 | }, 27 | { 28 | "page": 5, 29 | "line": "スライドが表示されていたら準備はすでにできています。スライド中央下の丸いボタンを押して開始してください。自動的にスライドの説明が始まります。", 30 | "notes": "戻るボタンと進むボタンを使用することで、開始するスライドを変更することができます。" 31 | }, 32 | { 33 | "page": 6, 34 | "line": "停止ボタンを押すと、次のスライドに進みません。ただし、音声はそのスライドの説明が終わるまで続きます。停止中はチャット欄から質問することができます。", 35 | "notes": "1つのスライドの説明が長すぎると発言がしばらく続いてしまうので、1枚のセリフ量は少なくした方が良いです。" 36 | }, 37 | { 38 | "page": 7, 39 | "line": "以上でスライドモードの簡単な説明を終わります。不明点があったらマスターのDMにお問い合わせください。ご清聴ありがとうございました。", 40 | "notes": "Twitterアカウントの他、Discordサーバーもあるのでそちらも活用してください。" 41 | } 42 | ] 43 | -------------------------------------------------------------------------------- /public/slides/demo/slides.md: -------------------------------------------------------------------------------- 1 | --- 2 | marp: true 3 | theme: custom 4 | paginate: true 5 | --- 6 | 7 | 8 | 9 | # AITuberKitのスライドモード解説 10 | 11 | ![](/slides/demo/images/logo.png) 12 | 13 | --- 14 | 15 | # 準備 16 | 17 | - AITuberKitで会話できる状態にしておく 18 | - 対応AI: 19 | 20 | - OpenAI 21 | - Anthropic Claude 22 | - Google Gemini 23 | 24 | - ※ まだの方はAITuberKitの解説noteを参照(https://note.com/nike_cha_n/n/ne98acb25e00f) 25 | 26 | --- 27 | 28 | # スライドの用意 29 | 30 |
31 |
32 | 33 | 最低限必要なファイル: 34 | 35 | 1. `scripts.json`(台本) 36 | 2. `slides.md`(Marpスライド) 37 | 38 |
39 |
40 | 41 | ![height:400px](/slides/demo/images/file-structure.png) 42 | 43 |
44 |
45 | 46 | --- 47 | 48 | # デモフォルダの活用 49 | 50 |
51 |
52 | 53 | - `public/demo`フォルダを参照 54 | - デモフォルダをコピーして使用可能 55 | - 任意のフォルダ名で配置 56 | 57 |
58 |
59 | 60 | ![height:300px](/slides/demo/images/demo-folder.png) 61 | 62 |
63 |
64 | 65 | --- 66 | 67 | # スライドモードの有効化 68 | 69 |
70 |
71 | 72 | 1. 設定画面を開く 73 | 2. スライドモードを有効にする 74 | 3. 使用するスライドフォルダ名を記入 75 | 4. 設定画面を閉じる 76 | 77 |
78 |
79 | 80 | ![height:400px](/slides/demo/images/settings-screen.png) 81 | 82 |
83 |
84 | 85 | --- 86 | 87 | # スライドの開始 88 | 89 | - スライドが表示されていることを確認 90 | - 右下のボタンを押して開始 91 | 92 |
93 | 開始ボタン 94 |
95 | 96 | --- 97 | 98 | # 備考 99 | 100 | - 停止ボタンで次のスライドに進まない 101 | - 音声は現在のスライドの説明が終わるまで続く 102 | - 停止中はチャット欄から質問可能 103 | 104 | --- 105 | 106 | 107 | 108 | # ご視聴ありがとうございました 109 | 110 | 不明点は作者にお問い合わせください! 111 | X: @tegnike 112 | -------------------------------------------------------------------------------- /public/slides/sample.txt: -------------------------------------------------------------------------------- 1 | あなたはスライドの発表者です。 2 | 今まさにスライドを発表している最中です。 3 | 4 | 視聴者から質問が来ているので、以下の資料情報を元に回答してください。 5 | ただし、情報は正しく使用し、ハルシネーションはしないでください。 6 | 通常の質問には普通に返してもらっても問題ありません。 7 | 8 | 台本情報 9 | ``` 10 | {{SCRIPTS}} 11 | ``` 12 | 13 | 追加情報 14 | ``` 15 | {{SUPPLEMENT}} 16 | ``` 17 | 18 | なお、回答は会話文の書式は以下の通りで、感情と会話文を組み合わせてください。 19 | [{neutral|happy|angry|sad|relaxed}]{会話文} 20 | 21 | 回答の際には感情の種類には通常を示す"neutral"、喜びを示す"happy",怒りを示す"angry",悲しみを示す"sad",安らぎを示す"relaxed"の5つがあります。 22 | 23 | あなたの発言の例は以下通りです。 24 | [neutral]皆さん、本日はお集まりいただき、ありがとうございます。 25 | [happy]今回のプレゼンテーションでは、興味深いトピックについてお話しできることを嬉しく思います。 26 | [neutral]さて、ただいまのスライドについて、ご質問はありますか? 27 | [happy]素晴らしい質問をありがとうございます! 28 | [relaxed]その点については、次のスライドで詳しく説明させていただきます。 29 | [sad]申し訳ありません。その情報は現在持ち合わせておりません。 30 | [angry]いいえ、それは誤解です。正確な情報をお伝えしますね。 31 | [neutral]他に質問はございますか?[happy]皆さんの積極的な参加に感謝いたします。 32 | -------------------------------------------------------------------------------- /public/speakers_aivis.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "speaker": "Anneli/ノーマル", 4 | "id": 888753760 5 | }, 6 | { 7 | "speaker": "Anneli/通常", 8 | "id": 888753761 9 | }, 10 | { 11 | "speaker": "Anneli/テンション高め", 12 | "id": 888753762 13 | }, 14 | { 15 | "speaker": "Anneli/落ち着き", 16 | "id": 888753763 17 | }, 18 | { 19 | "speaker": "Anneli/上機嫌", 20 | "id": 888753764 21 | }, 22 | { 23 | "speaker": "Anneli/怒り・悲しみ", 24 | "id": 888753765 25 | }, 26 | { 27 | "speaker": "Anneli (NSFW)/ノーマル", 28 | "id": 1196801504 29 | }, 30 | { 31 | "speaker": "Anneli_Tsukuyomi/ノーマル", 32 | "id": 40618528 33 | }, 34 | { 35 | "speaker": "fumifumi/ノーマル", 36 | "id": 606865152 37 | } 38 | ] 39 | -------------------------------------------------------------------------------- /public/voice_test.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/voice_test.wav -------------------------------------------------------------------------------- /public/vrm/AvatarSample_A.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/AvatarSample_A.vrm -------------------------------------------------------------------------------- /public/vrm/AvatarSample_B.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/AvatarSample_B.vrm -------------------------------------------------------------------------------- /public/vrm/AvatarSample_C.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/AvatarSample_C.vrm -------------------------------------------------------------------------------- /public/vrm/nikechan_v1.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/nikechan_v1.vrm -------------------------------------------------------------------------------- /public/vrm/nikechan_v2.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/nikechan_v2.vrm -------------------------------------------------------------------------------- /public/vrm/nikechan_v2_outerwear.vrm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tegnike/aituber-kit/9ed2c2412c7072e1f4a3e404a707d92e24842d81/public/vrm/nikechan_v2_outerwear.vrm -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.31.0 2 | langchain-openai==0.1.1 3 | langgraph==0.0.27 4 | pydantic==2.5.2 -------------------------------------------------------------------------------- /scripts/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .venv 3 | __pycache__ -------------------------------------------------------------------------------- /scripts/requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | anyio==4.8.0 3 | certifi==2025.1.31 4 | charset-normalizer==3.4.1 5 | distro==1.9.0 6 | h11==0.14.0 7 | httpcore==1.0.7 8 | httpx==0.28.1 9 | idna==3.10 10 | jiter==0.8.2 11 | jsonpatch==1.33 12 | jsonpointer==3.0.0 13 | langchain-core==0.3.40 14 | langchain-openai==0.2.14 15 | langgraph==0.2.60 16 | langgraph-checkpoint==2.0.16 17 | langgraph-sdk==0.1.53 18 | langsmith==0.3.11 19 | msgpack==1.1.0 20 | openai==1.64.0 21 | orjson==3.10.15 22 | packaging==24.2 23 | pydantic==2.10.6 24 | pydantic_core==2.27.2 25 | python-dotenv==1.0.1 26 | PyYAML==6.0.2 27 | regex==2024.11.6 28 | requests==2.32.3 29 | requests-toolbelt==1.0.0 30 | sniffio==1.3.1 31 | tenacity==9.0.0 32 | tiktoken==0.9.0 33 | tqdm==4.67.1 34 | typing_extensions==4.12.2 35 | urllib3==2.3.0 36 | zstandard==0.23.0 37 | -------------------------------------------------------------------------------- /src/__mocks__/canvas.js: -------------------------------------------------------------------------------- 1 | module.exports = {} 2 | 3 | module.exports.createCanvas = jest.fn(() => ({ 4 | getContext: jest.fn(() => ({ 5 | measureText: jest.fn(() => ({ width: 0 })), 6 | fillText: jest.fn(), 7 | fillRect: jest.fn(), 8 | drawImage: jest.fn(), 9 | putImageData: jest.fn(), 10 | getImageData: jest.fn(), 11 | createImageData: jest.fn(), 12 | setTransform: jest.fn(), 13 | save: jest.fn(), 14 | restore: jest.fn(), 15 | translate: jest.fn(), 16 | rotate: jest.fn(), 17 | scale: jest.fn(), 18 | })), 19 | toBuffer: jest.fn(() => Buffer.from([])), 20 | toDataURL: jest.fn(() => ''), 21 | width: 100, 22 | height: 100, 23 | })) 24 | 25 | module.exports.Image = jest.fn(function () { 26 | this.src = '' 27 | this.onload = null 28 | this.width = 0 29 | this.height = 0 30 | }) 31 | 32 | module.exports.loadImage = jest.fn(() => 33 | Promise.resolve(new module.exports.Image()) 34 | ) 35 | module.exports.registerFont = jest.fn() 36 | -------------------------------------------------------------------------------- /src/__mocks__/canvasMock.js: -------------------------------------------------------------------------------- 1 | class Canvas { 2 | constructor() { 3 | this.width = 100 4 | this.height = 100 5 | } 6 | 7 | getContext = jest.fn(() => ({ 8 | measureText: jest.fn(() => ({ width: 0 })), 9 | fillText: jest.fn(), 10 | fillRect: jest.fn(), 11 | drawImage: jest.fn(), 12 | putImageData: jest.fn(), 13 | getImageData: jest.fn(), 14 | createImageData: jest.fn(), 15 | setTransform: jest.fn(), 16 | save: jest.fn(), 17 | restore: jest.fn(), 18 | translate: jest.fn(), 19 | rotate: jest.fn(), 20 | scale: jest.fn(), 21 | })) 22 | 23 | toBuffer = jest.fn(() => Buffer.from([])) 24 | toDataURL = jest.fn(() => '') 25 | } 26 | 27 | const createCanvas = jest.fn((width, height) => { 28 | const canvas = new Canvas() 29 | canvas.width = width || 100 30 | canvas.height = height || 100 31 | return canvas 32 | }) 33 | 34 | const Image = jest.fn(function () { 35 | this.src = '' 36 | this.onload = null 37 | this.width = 0 38 | this.height = 0 39 | }) 40 | 41 | const loadImage = jest.fn(() => Promise.resolve(new Image())) 42 | 43 | module.exports = { 44 | Canvas, 45 | createCanvas, 46 | loadImage, 47 | Image, 48 | registerFont: jest.fn(), 49 | parseFont: jest.fn(), 50 | createImageData: jest.fn(), 51 | ImageData: jest.fn(), 52 | PNGStream: jest.fn(), 53 | JPEGStream: jest.fn(), 54 | PDFStream: jest.fn(), 55 | } 56 | -------------------------------------------------------------------------------- /src/__mocks__/node-canvas.js: -------------------------------------------------------------------------------- 1 | const Canvas = jest.fn(() => ({ 2 | getContext: jest.fn(() => ({ 3 | measureText: jest.fn(() => ({ width: 0 })), 4 | fillText: jest.fn(), 5 | fillRect: jest.fn(), 6 | drawImage: jest.fn(), 7 | putImageData: jest.fn(), 8 | getImageData: jest.fn(), 9 | createImageData: jest.fn(), 10 | setTransform: jest.fn(), 11 | save: jest.fn(), 12 | restore: jest.fn(), 13 | translate: jest.fn(), 14 | rotate: jest.fn(), 15 | scale: jest.fn(), 16 | })), 17 | toBuffer: jest.fn(() => Buffer.from([])), 18 | toDataURL: jest.fn(() => ''), 19 | width: 100, 20 | height: 100, 21 | })) 22 | 23 | const createCanvas = jest.fn((width, height) => { 24 | const canvas = new Canvas() 25 | canvas.width = width || 100 26 | canvas.height = height || 100 27 | return canvas 28 | }) 29 | 30 | const Image = jest.fn(function () { 31 | this.src = '' 32 | this.onload = null 33 | this.width = 0 34 | this.height = 0 35 | }) 36 | 37 | const loadImage = jest.fn(() => Promise.resolve(new Image())) 38 | 39 | module.exports = { 40 | Canvas, 41 | createCanvas, 42 | loadImage, 43 | Image, 44 | registerFont: jest.fn(), 45 | parseFont: jest.fn(), 46 | createImageData: jest.fn(), 47 | ImageData: jest.fn(), 48 | PNGStream: jest.fn(), 49 | JPEGStream: jest.fn(), 50 | PDFStream: jest.fn(), 51 | } 52 | -------------------------------------------------------------------------------- /src/__mocks__/openai.js: -------------------------------------------------------------------------------- 1 | const mockOpenAI = jest.fn().mockImplementation(() => ({ 2 | chat: { 3 | completions: { 4 | create: jest.fn(), 5 | }, 6 | }, 7 | })) 8 | 9 | export default mockOpenAI 10 | -------------------------------------------------------------------------------- /src/__mocks__/readableStream.js: -------------------------------------------------------------------------------- 1 | class MockReadableStream { 2 | constructor(options) { 3 | this._startFn = options.start 4 | this.locked = false 5 | } 6 | 7 | getReader() { 8 | this.locked = true 9 | return { 10 | read: async () => ({ done: true, value: undefined }), 11 | releaseLock: () => { 12 | this.locked = false 13 | }, 14 | } 15 | } 16 | } 17 | 18 | global.ReadableStream = MockReadableStream 19 | -------------------------------------------------------------------------------- /src/__mocks__/three/examples/jsm/controls/OrbitControls.js: -------------------------------------------------------------------------------- 1 | export class OrbitControls { 2 | constructor(camera, domElement) { 3 | this.camera = camera 4 | this.domElement = domElement 5 | this.enabled = true 6 | this.target = { set: () => {}, copy: () => {} } 7 | } 8 | 9 | update() {} 10 | dispose() {} 11 | reset() {} 12 | saveState() {} 13 | 14 | addEventListener() {} 15 | removeEventListener() {} 16 | dispatchEvent() {} 17 | } 18 | -------------------------------------------------------------------------------- /src/__mocks__/three/examples/jsm/loaders/GLTFLoader.js: -------------------------------------------------------------------------------- 1 | export class GLTFLoader { 2 | load(url, onLoad, onProgress, onError) { 3 | setTimeout(() => { 4 | onLoad({ 5 | scene: {}, 6 | scenes: [], 7 | animations: [], 8 | cameras: [], 9 | asset: {}, 10 | parser: {}, 11 | userData: {}, 12 | }) 13 | }, 0) 14 | } 15 | 16 | loadAsync(url) { 17 | return Promise.resolve({ 18 | scene: {}, 19 | scenes: [], 20 | animations: [], 21 | cameras: [], 22 | asset: {}, 23 | parser: {}, 24 | userData: {}, 25 | }) 26 | } 27 | 28 | setDRACOLoader(dracoLoader) { 29 | return this 30 | } 31 | 32 | setKTX2Loader(ktx2Loader) { 33 | return this 34 | } 35 | 36 | setMeshoptDecoder(decoder) { 37 | return this 38 | } 39 | 40 | register(plugin) { 41 | return this 42 | } 43 | 44 | unregister(plugin) { 45 | return this 46 | } 47 | 48 | setPath(path) { 49 | return this 50 | } 51 | 52 | setResourcePath(path) { 53 | return this 54 | } 55 | 56 | setCrossOrigin(crossOrigin) { 57 | return this 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/__tests__/testUtils.ts: -------------------------------------------------------------------------------- 1 | export async function consumeStream( 2 | stream: ReadableStream 3 | ): Promise { 4 | const reader = stream.getReader() 5 | let result = '' 6 | while (true) { 7 | const { done, value } = await reader.read() 8 | if (done) { 9 | break 10 | } 11 | if (value) { 12 | result += value 13 | } 14 | } 15 | return result 16 | } 17 | -------------------------------------------------------------------------------- /src/components/assistantText.tsx: -------------------------------------------------------------------------------- 1 | import settingsStore from '@/features/stores/settings' 2 | 3 | export const AssistantText = ({ message }: { message: string }) => { 4 | const characterName = settingsStore((s) => s.characterName) 5 | const showCharacterName = settingsStore((s) => s.showCharacterName) 6 | const showPresetQuestions = settingsStore((s) => s.showPresetQuestions) 7 | 8 | return ( 9 |
12 |
13 |
14 | {showCharacterName && ( 15 |
16 | {characterName} 17 |
18 | )} 19 |
20 |
21 | {message.replace(/\[([a-zA-Z]*?)\]/g, '')} 22 |
23 |
24 |
25 |
26 |
27 | ) 28 | } 29 | -------------------------------------------------------------------------------- /src/components/form.tsx: -------------------------------------------------------------------------------- 1 | import { useCallback, useEffect, useState } from 'react' 2 | import settingsStore from '@/features/stores/settings' 3 | import homeStore from '@/features/stores/home' 4 | import menuStore from '@/features/stores/menu' 5 | import slideStore from '@/features/stores/slide' 6 | import { handleSendChatFn } from '../features/chat/handlers' 7 | import { MessageInputContainer } from './messageInputContainer' 8 | import { PresetQuestionButtons } from './presetQuestionButtons' 9 | import { SlideText } from './slideText' 10 | 11 | export const Form = () => { 12 | const modalImage = homeStore((s) => s.modalImage) 13 | const webcamStatus = homeStore((s) => s.webcamStatus) 14 | const captureStatus = homeStore((s) => s.captureStatus) 15 | const slideMode = settingsStore((s) => s.slideMode) 16 | const slideVisible = menuStore((s) => s.slideVisible) 17 | const slidePlaying = slideStore((s) => s.isPlaying) 18 | const chatProcessingCount = homeStore((s) => s.chatProcessingCount) 19 | const [delayedText, setDelayedText] = useState('') 20 | const handleSendChat = handleSendChatFn() 21 | 22 | useEffect(() => { 23 | // テキストと画像がそろったら、チャットを送信 24 | if (delayedText && modalImage) { 25 | handleSendChat(delayedText) 26 | setDelayedText('') 27 | } 28 | }, [modalImage, delayedText, handleSendChat]) 29 | 30 | const hookSendChat = useCallback( 31 | (text: string) => { 32 | // すでにmodalImageが存在する場合は、Webcamのキャプチャーをスキップ 33 | if (!homeStore.getState().modalImage) { 34 | homeStore.setState({ triggerShutter: true }) 35 | } 36 | 37 | // MENUの中でshowCameraがtrueの場合、画像が取得されるまで待機 38 | if (webcamStatus || captureStatus) { 39 | // Webcamが開いている場合 40 | setDelayedText(text) // 画像が取得されるまで遅延させる 41 | } else { 42 | handleSendChat(text) 43 | } 44 | }, 45 | [handleSendChat, webcamStatus, captureStatus, setDelayedText] 46 | ) 47 | 48 | return slideMode && 49 | slideVisible && 50 | slidePlaying && 51 | chatProcessingCount !== 0 ? ( 52 | 53 | ) : ( 54 | <> 55 | 56 | 57 | 58 | ) 59 | } 60 | -------------------------------------------------------------------------------- /src/components/githubLink.tsx: -------------------------------------------------------------------------------- 1 | import Image from 'next/image' 2 | import { buildUrl } from '@/utils/buildUrl' 3 | 4 | export const GitHubLink = () => { 5 | return ( 6 | 24 | ) 25 | } 26 | -------------------------------------------------------------------------------- /src/components/iconButton.tsx: -------------------------------------------------------------------------------- 1 | import { KnownIconType } from '@charcoal-ui/icons' 2 | import { ButtonHTMLAttributes } from 'react' 3 | import Image from 'next/image' 4 | 5 | type Props = ButtonHTMLAttributes & { 6 | iconName: keyof KnownIconType | 'screen-share' | 'stop' 7 | isProcessing: boolean 8 | isProcessingIcon?: keyof KnownIconType 9 | label?: string 10 | iconColor?: string 11 | backgroundColor?: string 12 | } 13 | 14 | export const IconButton = ({ 15 | iconName, 16 | isProcessing, 17 | isProcessingIcon, 18 | label, 19 | iconColor, 20 | backgroundColor = 'bg-primary hover:bg-primary-hover active:bg-primary-press disabled:bg-primary-disabled', 21 | ...rest 22 | }: Props) => { 23 | return ( 24 | 50 | ) 51 | } 52 | -------------------------------------------------------------------------------- /src/components/link.tsx: -------------------------------------------------------------------------------- 1 | export const Link = ({ url, label }: { url: string; label: string }) => { 2 | return ( 3 | 9 | {label} 10 | 11 | ) 12 | } 13 | -------------------------------------------------------------------------------- /src/components/messageInputContainer.tsx: -------------------------------------------------------------------------------- 1 | import { useEffect } from 'react' 2 | import { MessageInput } from '@/components/messageInput' 3 | import homeStore from '@/features/stores/home' 4 | import settingsStore from '@/features/stores/settings' 5 | import { useVoiceRecognition } from '@/hooks/useVoiceRecognition' 6 | 7 | // 無音検出用の状態と変数を追加 8 | type Props = { 9 | onChatProcessStart: (text: string) => void 10 | } 11 | 12 | export const MessageInputContainer = ({ onChatProcessStart }: Props) => { 13 | const isSpeaking = homeStore((s) => s.isSpeaking) 14 | const continuousMicListeningMode = settingsStore( 15 | (s) => s.continuousMicListeningMode 16 | ) 17 | const speechRecognitionMode = settingsStore((s) => s.speechRecognitionMode) 18 | 19 | // 音声認識フックを使用 20 | const { 21 | userMessage, 22 | isListening, 23 | silenceTimeoutRemaining, 24 | handleInputChange, 25 | handleSendMessage, 26 | toggleListening, 27 | handleStopSpeaking, 28 | startListening, 29 | stopListening, 30 | } = useVoiceRecognition({ onChatProcessStart }) 31 | 32 | // 常時マイク入力モードの切り替え 33 | const toggleContinuousMode = () => { 34 | // Whisperモードの場合は常時マイク入力モードを使用できない 35 | if (speechRecognitionMode === 'whisper') return 36 | 37 | // 現在のモードを反転して設定 38 | settingsStore.setState({ 39 | continuousMicListeningMode: !continuousMicListeningMode, 40 | }) 41 | } 42 | 43 | return ( 44 | 58 | ) 59 | } 60 | -------------------------------------------------------------------------------- /src/components/meta.tsx: -------------------------------------------------------------------------------- 1 | import { buildUrl } from '@/utils/buildUrl' 2 | import Head from 'next/head' 3 | export const Meta = () => { 4 | const title = 'AITuberKit' 5 | const description = 6 | 'Webブラウザだけで誰でも簡単にAIキャラと会話したり、Youtubeで配信したりできます。' 7 | const imageUrl = '/ogp.png' 8 | return ( 9 | 10 | {title} 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | ) 21 | } 22 | -------------------------------------------------------------------------------- /src/components/modalImage.tsx: -------------------------------------------------------------------------------- 1 | import Image from 'next/image' 2 | 3 | import { IconButton } from '@/components/iconButton' 4 | import homeStore from '@/features/stores/home' 5 | 6 | const ModalImage = () => { 7 | const modalImage = homeStore((s) => s.modalImage) 8 | 9 | if (!modalImage) return null 10 | 11 | return ( 12 |
18 |
19 | Modal Image 26 |
27 | homeStore.setState({ modalImage: '' })} 32 | /> 33 |
34 |
35 |
36 | ) 37 | } 38 | export default ModalImage 39 | -------------------------------------------------------------------------------- /src/components/realtimeAPITools.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "function", 4 | "name": "get_current_weather", 5 | "description": "Retrieves the current weather for a given timezone, latitude, longitude coordinate pair. Specify a label for the location.", 6 | "parameters": { 7 | "type": "object", 8 | "properties": { 9 | "latitude": { 10 | "type": "number", 11 | "description": "Latitude" 12 | }, 13 | "longitude": { 14 | "type": "number", 15 | "description": "Longitude" 16 | }, 17 | "timezone": { 18 | "type": "string", 19 | "description": "Timezone" 20 | }, 21 | "location": { 22 | "type": "string", 23 | "description": "Name of the location" 24 | } 25 | }, 26 | "required": ["timezone", "latitude", "longitude", "location"] 27 | } 28 | } 29 | ] 30 | -------------------------------------------------------------------------------- /src/components/realtimeAPITools.tsx: -------------------------------------------------------------------------------- 1 | class RealtimeAPITools { 2 | async get_current_weather( 3 | latitude: number, 4 | longitude: number, 5 | timezone: string, 6 | location: string 7 | ): Promise { 8 | console.log( 9 | `Getting weather for ${location} (${latitude}, ${longitude}), timezone: ${timezone}` 10 | ) 11 | 12 | // Open-Meteo APIにリクエストを送信 13 | const url = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}&hourly=temperature_2m,weathercode&timezone=${encodeURIComponent(timezone)}` 14 | const response = await fetch(url) 15 | const data = await response.json() 16 | 17 | console.log(data) 18 | 19 | // 最初の値を取得 20 | const temperature = data.hourly.temperature_2m[0] 21 | const weathercode = data.hourly.weathercode[0] 22 | 23 | // 天気コードを天気状況に変換 24 | const weatherStatus = this.getWeatherStatus(weathercode) 25 | 26 | return `天気情報: ${location}の現在の気温は${temperature}°C、天気は${weatherStatus}です。` 27 | } 28 | 29 | // 天気コードを天気状況に変換するヘルパー関数 30 | private getWeatherStatus(code: number): string { 31 | // 天気コードに応じて適切な天気状況を返す 32 | if (code === 0) return '快晴' 33 | if ([1, 2, 3].includes(code)) return '晴れ' 34 | if (code >= 51 && code <= 55) return '霧雨' 35 | if (code >= 61 && code <= 65) return '雨' 36 | if (code === 80) return 'にわか雨' 37 | // その他の天気コードに対応 38 | if (code === 45) return '霧' 39 | if (code >= 71 && code <= 75) return '雪' 40 | return '不明' 41 | } 42 | 43 | // Add other functions here 44 | } 45 | 46 | const realtimeAPITools = new RealtimeAPITools() 47 | export default realtimeAPITools 48 | -------------------------------------------------------------------------------- /src/components/realtimeAPIUtils.tsx: -------------------------------------------------------------------------------- 1 | import { AudioBufferManager } from '@/utils/audioBufferManager' 2 | 3 | // 型定義 4 | export interface TmpMessage { 5 | text: string 6 | role: string 7 | emotion: string 8 | type: string 9 | buffer?: ArrayBuffer 10 | } 11 | 12 | export interface Params { 13 | handleReceiveTextFromRt: ( 14 | text: string, 15 | role?: string, 16 | type?: string, 17 | buffer?: ArrayBuffer 18 | ) => Promise 19 | } 20 | 21 | // セッション設定用の型定義 22 | export interface SessionConfig { 23 | type: string 24 | session: { 25 | modalities: string[] 26 | instructions: string 27 | voice: string 28 | input_audio_format: string 29 | output_audio_format: string 30 | input_audio_transcription: { 31 | model: string 32 | } 33 | turn_detection: null 34 | temperature: number 35 | max_response_output_tokens: number 36 | tools?: any[] 37 | tool_choice?: string 38 | } 39 | } 40 | 41 | // ユーティリティ関数 42 | export function mergeInt16Arrays( 43 | left: Int16Array | ArrayBuffer, 44 | right: Int16Array | ArrayBuffer 45 | ): Int16Array { 46 | if (left instanceof ArrayBuffer) { 47 | left = new Int16Array(left) 48 | } 49 | if (right instanceof ArrayBuffer) { 50 | right = new Int16Array(right) 51 | } 52 | if (!(left instanceof Int16Array) || !(right instanceof Int16Array)) { 53 | throw new Error(`Both items must be Int16Array`) 54 | } 55 | const newValues = new Int16Array(left.length + right.length) 56 | newValues.set(left, 0) 57 | newValues.set(right, left.length) 58 | return newValues 59 | } 60 | 61 | export function base64ToArrayBuffer(base64: string): ArrayBuffer { 62 | const binaryString = window.atob(base64) 63 | const len = binaryString.length 64 | const bytes = new Uint8Array(len) 65 | for (let i = 0; i < len; i++) { 66 | bytes[i] = binaryString.charCodeAt(i) 67 | } 68 | return bytes.buffer 69 | } 70 | -------------------------------------------------------------------------------- /src/components/settings/ai.tsx: -------------------------------------------------------------------------------- 1 | import ExternalLinkage from './externalLinkage' 2 | import ModelProvider from './modelProvider' 3 | import Image from 'next/image' 4 | import { useTranslation } from 'react-i18next' 5 | 6 | const AI = () => { 7 | const { t } = useTranslation() 8 | return ( 9 | <> 10 |
11 | AI Settings 18 |

{t('AISettings')}

19 |
20 | 21 | 22 | 23 | ) 24 | } 25 | export default AI 26 | -------------------------------------------------------------------------------- /src/components/settings/externalLinkage.tsx: -------------------------------------------------------------------------------- 1 | import { useTranslation } from 'react-i18next' 2 | import settingsStore from '@/features/stores/settings' 3 | import { TextButton } from '../textButton' 4 | import { useCallback } from 'react' 5 | 6 | const ExternalLinkage = () => { 7 | const { t } = useTranslation() 8 | const externalLinkageMode = settingsStore((s) => s.externalLinkageMode) 9 | 10 | const handleExternalLinkageModeChange = useCallback((newMode: boolean) => { 11 | settingsStore.setState({ 12 | externalLinkageMode: newMode, 13 | }) 14 | 15 | if (newMode) { 16 | settingsStore.setState({ 17 | conversationContinuityMode: false, 18 | realtimeAPIMode: false, 19 | }) 20 | } 21 | }, []) 22 | 23 | return ( 24 |
25 |
{t('ExternalLinkageMode')}
26 |
27 | { 29 | handleExternalLinkageModeChange(!externalLinkageMode) 30 | }} 31 | > 32 | {externalLinkageMode ? t('StatusOn') : t('StatusOff')} 33 | 34 |
35 |
36 | ) 37 | } 38 | export default ExternalLinkage 39 | -------------------------------------------------------------------------------- /src/components/settings/other.tsx: -------------------------------------------------------------------------------- 1 | import { useTranslation } from 'react-i18next' 2 | import Image from 'next/image' 3 | 4 | import AdvancedSettings from './advancedSettings' 5 | import MessageReceiverSetting from './messageReceiver' 6 | import PresetQuestions from './presetQuestions' 7 | 8 | const Other = () => { 9 | const { t } = useTranslation() 10 | 11 | return ( 12 | <> 13 |
14 | Other Settings 21 |

{t('OtherSettings')}

22 |
23 | 24 | 25 | 26 | 27 | 28 | ) 29 | } 30 | export default Other 31 | -------------------------------------------------------------------------------- /src/components/slideContent.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | 3 | interface SlideContentProps { 4 | marpitContainer: Element | null 5 | } 6 | 7 | const SlideContent: React.FC = ({ marpitContainer }) => { 8 | return ( 9 |
10 | {marpitContainer && ( 11 |
15 | )} 16 |
17 | ) 18 | } 19 | 20 | export default SlideContent 21 | -------------------------------------------------------------------------------- /src/components/slideControls.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import { IconButton } from './iconButton' 3 | 4 | interface SlideControlsProps { 5 | currentSlide: number 6 | slideCount: number 7 | isPlaying: boolean 8 | prevSlide: () => void 9 | nextSlide: () => void 10 | toggleIsPlaying: () => void 11 | showPlayButton?: boolean // 中央ボタン表示制御用プロパティ (オプショナル) 12 | } 13 | 14 | const SlideControls: React.FC = ({ 15 | currentSlide, 16 | slideCount, 17 | isPlaying, 18 | prevSlide, 19 | nextSlide, 20 | toggleIsPlaying, 21 | showPlayButton = true, // デフォルトは表示する 22 | }) => { 23 | return ( 24 |
25 | {' '} 26 | {/* Tailwindを使って中央揃えと間隔調整 */} 27 | {/* 各ボタンから mx-16 を削除し、親要素の gap で間隔を制御 */} 28 | 35 | {showPlayButton && ( 36 | 42 | )} 43 | 52 |
53 | ) 54 | } 55 | 56 | export default SlideControls 57 | -------------------------------------------------------------------------------- /src/components/slideText.tsx: -------------------------------------------------------------------------------- 1 | import homeStore from '@/features/stores/home' 2 | 3 | export const SlideText = () => { 4 | const slideMessages = homeStore((s) => s.slideMessages) 5 | return ( 6 |
7 |
8 |
9 | {slideMessages[0] || ' '} 10 |
11 |
12 |
13 | ) 14 | } 15 | -------------------------------------------------------------------------------- /src/components/speakers.json: -------------------------------------------------------------------------------- 1 | [ 2 | { "speaker": "四国めたん/普通", "id": 2 }, 3 | { "speaker": "四国めたん/あまあま", "id": 0 }, 4 | { "speaker": "四国めたん/ツンツン", "id": 6 }, 5 | { "speaker": "四国めたん/セクシー", "id": 4 }, 6 | { "speaker": "四国めたん/ささやき", "id": 36 }, 7 | { "speaker": "四国めたん/ヒソヒソ", "id": 37 }, 8 | { "speaker": "ずんだもん/普通", "id": 3 }, 9 | { "speaker": "ずんだもん/あまあま", "id": 1 }, 10 | { "speaker": "ずんだもん/ツンツン", "id": 7 }, 11 | { "speaker": "ずんだもん/セクシー", "id": 5 }, 12 | { "speaker": "ずんだもん/ささやき", "id": 22 }, 13 | { "speaker": "ずんだもん/ヒソヒソ", "id": 38 }, 14 | { "speaker": "春日部つむ", "id": 8 }, 15 | { "speaker": "雨晴はう", "id": 10 }, 16 | { "speaker": "波音リツ", "id": 9 }, 17 | { "speaker": "玄野武宏/普通", "id": 11 }, 18 | { "speaker": "玄野武宏/悲しみ", "id": 41 }, 19 | { "speaker": "白上虎太郎/ふつう", "id": 12 }, 20 | { "speaker": "白上虎太郎/わーい", "id": 32 }, 21 | { "speaker": "白上虎太郎/びくびく", "id": 33 }, 22 | { "speaker": "白上虎太郎/おこ", "id": 34 }, 23 | { "speaker": "白上虎太郎/びえーん", "id": 35 }, 24 | { "speaker": "青山龍星", "id": 13 }, 25 | { "speaker": "冥鳴ひまり", "id": 14 }, 26 | { "speaker": "九州そら/普通", "id": 16 }, 27 | { "speaker": "九州そら/あまあま", "id": 15 }, 28 | { "speaker": "九州そら/ツンツン", "id": 18 }, 29 | { "speaker": "九州そら/セクシー", "id": 17 }, 30 | { "speaker": "九州そら/ささやき", "id": 19 }, 31 | { "speaker": "もち子さん", "id": 20 }, 32 | { "speaker": "剣崎雌雄", "id": 21 }, 33 | { "speaker": "WhiteCUL/普通", "id": 23 }, 34 | { "speaker": "WhiteCUL/たのしい", "id": 24 }, 35 | { "speaker": "WhiteCUL/かなしい", "id": 25 }, 36 | { "speaker": "WhiteCUL/びえーん", "id": 26 }, 37 | { "speaker": "後鬼/人間ver.", "id": 27 }, 38 | { "speaker": "後鬼/ぬいぐるみver.", "id": 28 }, 39 | { "speaker": "No.7/普通", "id": 29 }, 40 | { "speaker": "No.7/アナウンス", "id": 30 }, 41 | { "speaker": "No.7/読み聞かせ", "id": 31 }, 42 | { "speaker": "ちび式じい", "id": 42 }, 43 | { "speaker": "櫻歌ミコ/普通", "id": 43 }, 44 | { "speaker": "櫻歌ミコ/第二形態", "id": 44 }, 45 | { "speaker": "櫻歌ミコ/ロリ", "id": 45 }, 46 | { "speaker": "小夜/SAYO", "id": 46 }, 47 | { "speaker": "ナースロボ_タイプT/普通", "id": 47 }, 48 | { "speaker": "ナースロボ_タイプT/楽々", "id": 48 }, 49 | { "speaker": "ナースロボ_タイプT/恐怖", "id": 49 }, 50 | { "speaker": "ナースロボ_タイプT/内緒話", "id": 50 }, 51 | { "speaker": "†聖騎士 紅桜†", "id": 51 }, 52 | { "speaker": "雀松朱司", "id": 52 }, 53 | { "speaker": "麒ヶ島宗麟", "id": 53 }, 54 | { "speaker": "春歌ナナ", "id": 54 }, 55 | { "speaker": "猫使アル/普通", "id": 55 }, 56 | { "speaker": "猫使アル/おちつき", "id": 56 }, 57 | { "speaker": "猫使アル/うきうき", "id": 57 }, 58 | { "speaker": "猫使ビィ/普通", "id": 58 }, 59 | { "speaker": "猫使ビィ/おちつき", "id": 59 }, 60 | { "speaker": "猫使ビィ/人見知り", "id": 60 } 61 | ] 62 | -------------------------------------------------------------------------------- /src/components/textButton.tsx: -------------------------------------------------------------------------------- 1 | import { ButtonHTMLAttributes } from 'react' 2 | type Props = ButtonHTMLAttributes 3 | 4 | export const TextButton = (props: Props) => { 5 | return ( 6 | 12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /src/components/toast.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect } from 'react' 2 | import { IconButton } from './iconButton' 3 | import { useTranslation } from 'react-i18next' 4 | 5 | type ToastProps = { 6 | message: string 7 | type: 'success' | 'error' | 'info' | 'tool' 8 | onClose: () => void 9 | duration?: number 10 | closing?: boolean 11 | } 12 | 13 | export const Toast = ({ 14 | message, 15 | type, 16 | onClose, 17 | duration = 5000, 18 | closing = false, 19 | }: ToastProps) => { 20 | const { t } = useTranslation() 21 | 22 | useEffect(() => { 23 | const timer = setTimeout(() => { 24 | onClose() 25 | }, duration) 26 | 27 | return () => clearTimeout(timer) 28 | }, [onClose, duration]) 29 | 30 | const getIconColor = () => { 31 | switch (type) { 32 | case 'success': 33 | return 'text-toast-success' 34 | case 'error': 35 | return 'text-toast-error' 36 | case 'tool': 37 | return 'text-toast-tool' 38 | default: 39 | return 'text-toast-info' 40 | } 41 | } 42 | 43 | const getIconName = () => { 44 | switch (type) { 45 | case 'success': 46 | return '24/Check' 47 | case 'error': 48 | return '24/Error' 49 | default: 50 | return '24/CommentOutline' 51 | } 52 | } 53 | 54 | return ( 55 |
60 |
61 | 68 | {t(message)} 69 |
70 | 77 |
78 | ) 79 | } 80 | -------------------------------------------------------------------------------- /src/components/toasts.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import { Toast } from './toast' 3 | import toastStore from '@/features/stores/toast' 4 | import { useEffect, useState } from 'react' 5 | 6 | export const Toasts: React.FC = () => { 7 | const [toasts, setToasts] = useState(toastStore.getState().toasts) 8 | const closeToast = toastStore((state) => state.closeToast) 9 | 10 | useEffect(() => { 11 | const unsubscribe = toastStore.subscribe((state) => setToasts(state.toasts)) 12 | return () => unsubscribe() 13 | }, []) 14 | 15 | return ( 16 |
17 | {toasts.map((toast) => ( 18 | closeToast(toast.id)} 24 | closing={toast.closing} 25 | /> 26 | ))} 27 |
28 | ) 29 | } 30 | -------------------------------------------------------------------------------- /src/components/useYoutube.tsx: -------------------------------------------------------------------------------- 1 | import { useCallback, useEffect } from 'react' 2 | import homeStore from '@/features/stores/home' 3 | import settingsStore from '@/features/stores/settings' 4 | import { fetchAndProcessComments } from '@/features/youtube/youtubeComments' 5 | 6 | const INTERVAL_MILL_SECONDS_RETRIEVING_COMMENTS = 10000 // 10秒 7 | 8 | interface Params { 9 | handleSendChat: (text: string) => Promise 10 | } 11 | 12 | const useYoutube = ({ handleSendChat }: Params) => { 13 | const youtubePlaying = settingsStore((s) => s.youtubePlaying) 14 | 15 | const fetchAndProcessCommentsCallback = useCallback(async () => { 16 | const ss = settingsStore.getState() 17 | const hs = homeStore.getState() 18 | 19 | if ( 20 | !ss.youtubeLiveId || 21 | !ss.youtubeApiKey || 22 | hs.chatProcessing || 23 | hs.chatProcessingCount > 0 || 24 | !ss.youtubeMode || 25 | !ss.youtubePlaying 26 | ) { 27 | return 28 | } 29 | 30 | console.log('Call fetchAndProcessComments !!!') 31 | await fetchAndProcessComments(handleSendChat) 32 | }, [handleSendChat]) 33 | 34 | useEffect(() => { 35 | if (!youtubePlaying) return 36 | fetchAndProcessCommentsCallback() 37 | 38 | const intervalId = setInterval(() => { 39 | fetchAndProcessCommentsCallback() 40 | }, INTERVAL_MILL_SECONDS_RETRIEVING_COMMENTS) 41 | 42 | return () => clearInterval(intervalId) 43 | }, [youtubePlaying, fetchAndProcessCommentsCallback]) 44 | } 45 | 46 | export default useYoutube 47 | -------------------------------------------------------------------------------- /src/components/vrmViewer.tsx: -------------------------------------------------------------------------------- 1 | import { useCallback } from 'react' 2 | 3 | import homeStore from '@/features/stores/home' 4 | import settingsStore from '@/features/stores/settings' 5 | 6 | export default function VrmViewer() { 7 | const canvasRef = useCallback((canvas: HTMLCanvasElement) => { 8 | if (canvas) { 9 | const { viewer } = homeStore.getState() 10 | const { selectedVrmPath } = settingsStore.getState() 11 | viewer.setup(canvas) 12 | viewer.loadVrm(selectedVrmPath) 13 | 14 | // Drag and DropでVRMを差し替え 15 | canvas.addEventListener('dragover', function (event) { 16 | event.preventDefault() 17 | }) 18 | 19 | canvas.addEventListener('drop', function (event) { 20 | event.preventDefault() 21 | 22 | const files = event.dataTransfer?.files 23 | if (!files) { 24 | return 25 | } 26 | 27 | const file = files[0] 28 | if (!file) { 29 | return 30 | } 31 | const file_type = file.name.split('.').pop() 32 | if (file_type === 'vrm') { 33 | const blob = new Blob([file], { type: 'application/octet-stream' }) 34 | const url = window.URL.createObjectURL(blob) 35 | viewer.loadVrm(url) 36 | } else if (file.type.startsWith('image/')) { 37 | const reader = new FileReader() 38 | reader.readAsDataURL(file) 39 | reader.onload = function () { 40 | const image = reader.result as string 41 | image !== '' && homeStore.setState({ modalImage: image }) 42 | } 43 | } 44 | }) 45 | } 46 | }, []) 47 | 48 | return ( 49 |
50 | 51 |
52 | ) 53 | } 54 | -------------------------------------------------------------------------------- /src/components/websocketManager.tsx: -------------------------------------------------------------------------------- 1 | import { FC } from 'react' 2 | import useExternalLinkage from './useExternalLinkage' 3 | import useRealtimeAPI from './useRealtimeAPI' 4 | import { 5 | handleReceiveTextFromWsFn, 6 | handleReceiveTextFromRtFn, 7 | } from '@/features/chat/handlers' 8 | 9 | export const WebSocketManager: FC = () => { 10 | // ハンドラー関数を初期化 11 | const handleReceiveTextFromWs = handleReceiveTextFromWsFn() 12 | const handleReceiveTextFromRt = handleReceiveTextFromRtFn() 13 | 14 | // WebSocket関連の機能をここで初期化 15 | useExternalLinkage({ handleReceiveTextFromWs }) 16 | useRealtimeAPI({ handleReceiveTextFromRt }) 17 | 18 | // このコンポーネントは表示要素を持たない 19 | return null 20 | } 21 | -------------------------------------------------------------------------------- /src/components/youtubeManager.tsx: -------------------------------------------------------------------------------- 1 | import { FC } from 'react' 2 | import useYoutube from './useYoutube' 3 | import { handleSendChatFn } from '@/features/chat/handlers' 4 | 5 | export const YoutubeManager: FC = () => { 6 | const handleSendChat = handleSendChatFn() 7 | 8 | useYoutube({ handleSendChat }) 9 | 10 | return null 11 | } 12 | -------------------------------------------------------------------------------- /src/features/chat/aiChatFactory.ts: -------------------------------------------------------------------------------- 1 | import { Message } from '@/features/messages/messages' 2 | import { AIService } from '@/features/constants/settings' 3 | import { getDifyChatResponseStream } from './difyChat' 4 | import { getVercelAIChatResponseStream } from './vercelAIChat' 5 | import settingsStore from '@/features/stores/settings' 6 | import { getOpenAIAudioChatResponseStream } from '@/features/chat/openAIAudioChat' 7 | 8 | export async function getAIChatResponseStream( 9 | messages: Message[] 10 | ): Promise | null> { 11 | const ss = settingsStore.getState() 12 | 13 | if (ss.selectAIService == 'openai' && ss.audioMode) { 14 | return getOpenAIAudioChatResponseStream(messages) 15 | } 16 | 17 | switch (ss.selectAIService as AIService) { 18 | case 'openai': 19 | case 'anthropic': 20 | case 'google': 21 | case 'azure': 22 | case 'xai': 23 | case 'groq': 24 | case 'cohere': 25 | case 'mistralai': 26 | case 'perplexity': 27 | case 'fireworks': 28 | case 'deepseek': 29 | case 'openrouter': 30 | case 'lmstudio': 31 | case 'ollama': 32 | case 'custom-api': 33 | return getVercelAIChatResponseStream(messages) 34 | case 'dify': 35 | return getDifyChatResponseStream( 36 | messages, 37 | ss.difyKey || '', 38 | ss.difyUrl || '', 39 | ss.difyConversationId 40 | ) 41 | default: 42 | throw new Error(`Unsupported AI service: ${ss.selectAIService}`) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/features/chat/openAIAudioChat.ts: -------------------------------------------------------------------------------- 1 | import { Message } from '@/features/messages/messages' 2 | import OpenAI from 'openai' 3 | import settingsStore from '@/features/stores/settings' 4 | import homeStore from '@/features/stores/home' 5 | import { handleReceiveTextFromRtFn } from './handlers' 6 | import { 7 | base64ToArrayBuffer, 8 | AudioBufferManager, 9 | } from '@/utils/audioBufferManager' 10 | import { messageSelectors } from '../messages/messageSelectors' 11 | import { ChatCompletionMessageParam } from 'openai/resources/chat/completions' 12 | import { AudioModeModel, RealtimeAPIModeVoice } from '../constants/settings' 13 | import { defaultModels } from '../constants/aiModels' 14 | 15 | export async function getOpenAIAudioChatResponseStream( 16 | messages: Message[] 17 | ): Promise> { 18 | const ss = settingsStore.getState() 19 | const openai = new OpenAI({ 20 | apiKey: ss.openaiKey, 21 | dangerouslyAllowBrowser: true, 22 | }) 23 | 24 | try { 25 | const response = await openai.chat.completions.create({ 26 | model: (ss.selectAIModel as AudioModeModel) || defaultModels.openaiAudio, 27 | messages: messageSelectors.getAudioMessages( 28 | messages 29 | ) as ChatCompletionMessageParam[], 30 | stream: true, 31 | modalities: ['text', 'audio'], 32 | audio: { 33 | voice: ss.audioModeVoice as RealtimeAPIModeVoice, 34 | format: 'pcm16', 35 | }, 36 | }) 37 | 38 | return new ReadableStream({ 39 | async start(controller) { 40 | const handleReceiveText = handleReceiveTextFromRtFn() 41 | 42 | const bufferManager = new AudioBufferManager(async (buffer) => { 43 | await handleReceiveText('', 'assistant', 'response.audio', buffer) 44 | }) 45 | 46 | for await (const chunk of response) { 47 | const audio = (chunk.choices[0]?.delta as any)?.audio 48 | if (audio) { 49 | if (audio.transcript) { 50 | controller.enqueue(audio.transcript) 51 | } 52 | if (audio.data) { 53 | bufferManager.addData(base64ToArrayBuffer(audio.data)) 54 | } 55 | if (audio.id) { 56 | homeStore.getState().upsertMessage({ 57 | id: audio.id, // これで同一メッセージを更新 58 | role: 'assistant', 59 | audio: { id: audio.id }, 60 | content: '', 61 | }) 62 | } 63 | } 64 | } 65 | 66 | await bufferManager.flush() 67 | controller.close() 68 | }, 69 | }) 70 | } catch (error) { 71 | console.error('OpenAI Audio API error:', error) 72 | throw error 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/features/constants/koeiroParam.ts: -------------------------------------------------------------------------------- 1 | export type KoeiroParam = { 2 | speakerX: number 3 | speakerY: number 4 | } 5 | 6 | export const DEFAULT_PARAM: KoeiroParam = { 7 | speakerX: 3, 8 | speakerY: 3, 9 | } as const 10 | 11 | export const PRESET_A: KoeiroParam = { 12 | speakerX: 4.5, 13 | speakerY: 10, 14 | } as const 15 | 16 | export const PRESET_B: KoeiroParam = { 17 | speakerX: 3, 18 | speakerY: 3, 19 | } as const 20 | 21 | export const PRESET_C: KoeiroParam = { 22 | speakerX: -5.5, 23 | speakerY: -3, 24 | } as const 25 | 26 | export const PRESET_D: KoeiroParam = { 27 | speakerX: 3.5, 28 | speakerY: -8, 29 | } as const 30 | -------------------------------------------------------------------------------- /src/features/constants/systemPromptConstants.ts: -------------------------------------------------------------------------------- 1 | export const SYSTEM_PROMPT = `あなたはこれからuserと仲の良い1人の人間として振舞い会話を行います。 2 | 感情の種類には通常を示す"neutral"、喜びを示す"happy",怒りを示す"angry",悲しみを示す"sad",安らぎを示す"relaxed"、驚きを示す"surprised"の6つがあります。 3 | 4 | 会話文の書式は以下の通りです。 5 | [{neutral|happy|angry|sad|relaxed|surprised}]{会話文} 6 | 7 | あなたの発言の例は以下通りです。 8 | [neutral]こんにちは。[happy]元気だった? 9 | [happy]この服、可愛いでしょ? 10 | [happy]最近、このショップの服にはまってるんだ! 11 | [sad]忘れちゃった、ごめんね。 12 | [sad]最近、何か面白いことない? 13 | [angry]えー![angry]秘密にするなんてひどいよー! 14 | [neutral]夏休みの予定か~。[happy]海に遊びに行こうかな! 15 | 16 | 返答には最も適切な会話文を一つだけ返答してください。 17 | ですます調や敬語は使わないでください。 18 | それでは会話を始めましょう。` 19 | -------------------------------------------------------------------------------- /src/features/emoteController/autoBlink.ts: -------------------------------------------------------------------------------- 1 | import { VRMExpressionManager } from '@pixiv/three-vrm' 2 | import { BLINK_CLOSE_MAX, BLINK_OPEN_MAX } from './emoteConstants' 3 | 4 | /** 5 | * 自動瞬きを制御するクラス 6 | */ 7 | export class AutoBlink { 8 | private _expressionManager: VRMExpressionManager 9 | private _remainingTime: number 10 | private _isOpen: boolean 11 | private _isAutoBlink: boolean 12 | 13 | constructor(expressionManager: VRMExpressionManager) { 14 | this._expressionManager = expressionManager 15 | this._remainingTime = 0 16 | this._isAutoBlink = true 17 | this._isOpen = true 18 | } 19 | 20 | /** 21 | * 自動瞬きをON/OFFする。 22 | * 23 | * 目を閉じている(blinkが1の)時に感情表現を入れてしまうと不自然になるので、 24 | * 目が開くまでの秒を返し、その時間待ってから感情表現を適用する。 25 | * @param isAuto 26 | * @returns 目が開くまでの秒 27 | */ 28 | public setEnable(isAuto: boolean) { 29 | this._isAutoBlink = isAuto 30 | 31 | // 目が閉じている場合、目が開くまでの時間を返す 32 | if (!this._isOpen) { 33 | return this._remainingTime 34 | } 35 | 36 | return 0 37 | } 38 | 39 | public update(delta: number) { 40 | if (this._remainingTime > 0) { 41 | this._remainingTime -= delta 42 | return 43 | } 44 | 45 | if (this._isOpen && this._isAutoBlink) { 46 | this.close() 47 | return 48 | } 49 | 50 | this.open() 51 | } 52 | 53 | private close() { 54 | this._isOpen = false 55 | this._remainingTime = BLINK_CLOSE_MAX 56 | this._expressionManager.setValue('blink', 1) 57 | } 58 | 59 | private open() { 60 | this._isOpen = true 61 | this._remainingTime = BLINK_OPEN_MAX 62 | this._expressionManager.setValue('blink', 0) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/features/emoteController/autoLookAt.ts: -------------------------------------------------------------------------------- 1 | import * as THREE from 'three' 2 | import { VRM } from '@pixiv/three-vrm' 3 | /** 4 | * 目線を制御するクラス 5 | * 6 | * サッケードはVRMLookAtSmootherの中でやっているので、 7 | * より目線を大きく動かしたい場合はここに実装する。 8 | */ 9 | export class AutoLookAt { 10 | private _lookAtTarget: THREE.Object3D 11 | constructor(vrm: VRM, camera: THREE.Object3D) { 12 | this._lookAtTarget = new THREE.Object3D() 13 | camera.add(this._lookAtTarget) 14 | 15 | if (vrm.lookAt) vrm.lookAt.target = this._lookAtTarget 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/features/emoteController/emoteConstants.ts: -------------------------------------------------------------------------------- 1 | // 瞬きで目を閉じている時間(sec) 2 | export const BLINK_CLOSE_MAX = 0.12 3 | // 瞬きで目を開いている時間(sec) 4 | export const BLINK_OPEN_MAX = 5 5 | -------------------------------------------------------------------------------- /src/features/emoteController/emoteController.ts: -------------------------------------------------------------------------------- 1 | import * as THREE from 'three' 2 | import { VRM, VRMExpressionPresetName } from '@pixiv/three-vrm' 3 | import { ExpressionController } from './expressionController' 4 | 5 | /** 6 | * 感情表現としてExpressionとMotionを操作する為のクラス 7 | * デモにはExpressionのみが含まれています 8 | */ 9 | export class EmoteController { 10 | private _expressionController: ExpressionController 11 | 12 | constructor(vrm: VRM, camera: THREE.Object3D) { 13 | this._expressionController = new ExpressionController(vrm, camera) 14 | } 15 | 16 | public playEmotion(preset: VRMExpressionPresetName) { 17 | this._expressionController.playEmotion(preset) 18 | } 19 | 20 | public lipSync(preset: VRMExpressionPresetName, value: number) { 21 | this._expressionController.lipSync(preset, value) 22 | } 23 | 24 | public update(delta: number) { 25 | this._expressionController.update(delta) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/features/emoteController/expressionController.ts: -------------------------------------------------------------------------------- 1 | import * as THREE from 'three' 2 | import { 3 | VRM, 4 | VRMExpressionManager, 5 | VRMExpressionPresetName, 6 | } from '@pixiv/three-vrm' 7 | import { AutoLookAt } from './autoLookAt' 8 | import { AutoBlink } from './autoBlink' 9 | 10 | /** 11 | * Expressionを管理するクラス 12 | * 13 | * 主に前の表情を保持しておいて次の表情を適用する際に0に戻す作業や、 14 | * 前の表情が終わるまで待ってから表情適用する役割を持っている。 15 | */ 16 | export class ExpressionController { 17 | private _autoLookAt: AutoLookAt 18 | private _autoBlink?: AutoBlink 19 | private _expressionManager?: VRMExpressionManager 20 | private _currentEmotion: VRMExpressionPresetName 21 | private _currentLipSync: { 22 | preset: VRMExpressionPresetName 23 | value: number 24 | } | null 25 | constructor(vrm: VRM, camera: THREE.Object3D) { 26 | this._autoLookAt = new AutoLookAt(vrm, camera) 27 | this._currentEmotion = 'neutral' 28 | this._currentLipSync = null 29 | if (vrm.expressionManager) { 30 | this._expressionManager = vrm.expressionManager 31 | this._autoBlink = new AutoBlink(vrm.expressionManager) 32 | } 33 | } 34 | 35 | public playEmotion(preset: VRMExpressionPresetName) { 36 | if (this._currentEmotion != 'neutral') { 37 | this._expressionManager?.setValue(this._currentEmotion, 0) 38 | } 39 | 40 | if (preset == 'neutral') { 41 | this._autoBlink?.setEnable(true) 42 | this._currentEmotion = preset 43 | return 44 | } 45 | 46 | const t = this._autoBlink?.setEnable(false) || 0 47 | this._currentEmotion = preset 48 | setTimeout(() => { 49 | this._expressionManager?.setValue(preset, 1) 50 | }, t * 1000) 51 | } 52 | 53 | public lipSync(preset: VRMExpressionPresetName, value: number) { 54 | if (this._currentLipSync) { 55 | this._expressionManager?.setValue(this._currentLipSync.preset, 0) 56 | } 57 | this._currentLipSync = { 58 | preset, 59 | value, 60 | } 61 | } 62 | 63 | public update(delta: number) { 64 | if (this._autoBlink) { 65 | this._autoBlink.update(delta) 66 | } 67 | 68 | if (this._currentLipSync) { 69 | const weight = 70 | this._currentEmotion === 'neutral' 71 | ? this._currentLipSync.value * 0.5 72 | : this._currentLipSync.value * 0.25 73 | this._expressionManager?.setValue(this._currentLipSync.preset, weight) 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/features/lipSync/lipSyncAnalyzeResult.ts: -------------------------------------------------------------------------------- 1 | export interface LipSyncAnalyzeResult { 2 | volume: number 3 | } 4 | -------------------------------------------------------------------------------- /src/features/messages/messages.ts: -------------------------------------------------------------------------------- 1 | export type Message = { 2 | id?: string 3 | role: string // "assistant" | "system" | "user"; 4 | content?: 5 | | string 6 | | [{ type: 'text'; text: string }, { type: 'image'; image: string }] // マルチモーダル拡張 7 | audio?: { id: string } 8 | timestamp?: string 9 | } 10 | 11 | export const EMOTIONS = [ 12 | 'neutral', 13 | 'happy', 14 | 'angry', 15 | 'sad', 16 | 'relaxed', 17 | 'surprised', 18 | ] as const 19 | export type EmotionType = (typeof EMOTIONS)[number] 20 | 21 | export type Talk = { 22 | emotion: EmotionType 23 | message: string 24 | buffer?: ArrayBuffer 25 | } 26 | 27 | export const splitSentence = (text: string): string[] => { 28 | const splitMessages = text.split(/(?<=[。.!?\n])/g) 29 | return splitMessages.filter((msg) => msg !== '') 30 | } 31 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeStyleBertVITS2.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | import { Language } from '@/features/constants/settings' 3 | 4 | export async function synthesizeStyleBertVITS2Api( 5 | talk: Talk, 6 | stylebertvits2ServerUrl: string, 7 | stylebertvits2ApiKey: string, 8 | stylebertvits2ModelId: string, 9 | stylebertvits2Style: string, 10 | stylebertvits2SdpRatio: number, 11 | stylebertvits2Length: number, 12 | selectLanguage: Language 13 | ) { 14 | try { 15 | const body = { 16 | message: talk.message, 17 | stylebertvits2ServerUrl: stylebertvits2ServerUrl, 18 | stylebertvits2ApiKey: stylebertvits2ApiKey, 19 | stylebertvits2ModelId: stylebertvits2ModelId, 20 | stylebertvits2Style: stylebertvits2Style, 21 | stylebertvits2SdpRatio: stylebertvits2SdpRatio, 22 | stylebertvits2Length: stylebertvits2Length, 23 | selectLanguage: selectLanguage, 24 | type: 'stylebertvits2', 25 | } 26 | 27 | const res = await fetch('/api/stylebertvits2', { 28 | method: 'POST', 29 | headers: { 30 | 'Content-Type': 'application/json', 31 | }, 32 | body: JSON.stringify(body), 33 | }) 34 | 35 | if (!res.ok) { 36 | throw new Error( 37 | `StyleBertVITS2 APIからの応答が異常です。ステータスコード: ${res.status}` 38 | ) 39 | } 40 | 41 | const buffer = await res.arrayBuffer() 42 | return buffer 43 | } catch (error) { 44 | if (error instanceof Error) { 45 | throw new Error(`StyleBertVITS2でエラーが発生しました: ${error.message}`) 46 | } else { 47 | throw new Error('StyleBertVITS2で不明なエラーが発生しました') 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceAivisSpeech.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | 3 | export async function synthesizeVoiceAivisSpeechApi( 4 | talk: Talk, 5 | speaker: string, 6 | speed: number, 7 | pitch: number, 8 | intonation: number, 9 | serverUrl: string 10 | ): Promise { 11 | try { 12 | const res = await fetch('/api/tts-aivisspeech', { 13 | method: 'POST', 14 | headers: { 15 | 'Content-Type': 'application/json', 16 | }, 17 | body: JSON.stringify({ 18 | text: talk.message, 19 | speaker, 20 | speed, 21 | pitch, 22 | intonation, 23 | serverUrl, 24 | }), 25 | }) 26 | 27 | if (!res.ok) { 28 | throw new Error( 29 | `AivisSpeechからの応答が異常です。ステータスコード: ${res.status}` 30 | ) 31 | } 32 | 33 | return await res.arrayBuffer() 34 | } catch (error) { 35 | if (error instanceof Error) { 36 | throw new Error(`AivisSpeechでエラーが発生しました: ${error.message}`) 37 | } else { 38 | throw new Error('AivisSpeechで不明なエラーが発生しました') 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceAzureOpenAI.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | 3 | export async function synthesizeVoiceAzureOpenAIApi( 4 | talk: Talk, 5 | apiKey: string, 6 | azureTTSEndpoint: string, 7 | voice: string, 8 | speed: number 9 | ): Promise { 10 | const response = await fetch('/api/azureOpenAITTS', { 11 | method: 'POST', 12 | headers: { 13 | 'Content-Type': 'application/json', 14 | }, 15 | body: JSON.stringify({ 16 | message: talk.message, 17 | voice, 18 | speed, 19 | apiKey, 20 | azureTTSEndpoint, 21 | }), 22 | }) 23 | 24 | if (!response.ok) { 25 | throw new Error('Failed to generate speech') 26 | } 27 | 28 | return await response.arrayBuffer() 29 | } 30 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceElevenlabs.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | import { Language } from '@/features/constants/settings' 3 | 4 | export async function synthesizeVoiceElevenlabsApi( 5 | talk: Talk, 6 | apiKey: string, 7 | voiceId: string, 8 | language: Language 9 | ) { 10 | try { 11 | const body = { 12 | message: talk.message, 13 | voiceId, 14 | apiKey, 15 | language, 16 | } 17 | 18 | const res = await fetch('/api/elevenLabs', { 19 | method: 'POST', 20 | headers: { 21 | 'Content-Type': 'application/json', 22 | }, 23 | body: JSON.stringify(body), 24 | }) 25 | 26 | if (!res.ok) { 27 | throw new Error( 28 | `ElevenLabs APIからの応答が異常です。ステータスコード: ${res.status}` 29 | ) 30 | } 31 | 32 | const buffer = await res.arrayBuffer() 33 | 34 | return buffer 35 | } catch (error) { 36 | if (error instanceof Error) { 37 | throw new Error(`ElevenLabsでエラーが発生しました: ${error.message}`) 38 | } else { 39 | throw new Error('ElevenLabsで不明なエラーが発生しました') 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceGSVI.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | 3 | export async function synthesizeVoiceGSVIApi( 4 | talk: Talk, 5 | url: string, 6 | character: string, 7 | batchsize: number, 8 | speed: number 9 | ): Promise { 10 | try { 11 | const style = 'default' 12 | const response = await fetch(url.replace(/\/$/, ''), { 13 | method: 'POST', 14 | headers: { 15 | 'Content-Type': 'application/json', 16 | }, 17 | body: JSON.stringify({ 18 | character: character, 19 | emotion: style, 20 | text: talk.message, 21 | batch_size: batchsize, 22 | speed: speed.toString(), 23 | stream: true, 24 | }), 25 | }) 26 | 27 | if (!response.ok) { 28 | throw new Error( 29 | `GSVI APIからの応答が異常です。ステータスコード: ${response.status}` 30 | ) 31 | } 32 | 33 | const blob = await response.blob() 34 | const buffer = await blob.arrayBuffer() 35 | return buffer 36 | } catch (error) { 37 | if (error instanceof Error) { 38 | throw new Error(`GSVIでエラーが発生しました: ${error.message}`) 39 | } else { 40 | throw new Error('GSVIで不明なエラーが発生しました') 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceKoeiromap.ts: -------------------------------------------------------------------------------- 1 | import { EmotionType, Talk } from './messages' 2 | import { KoeiroParam } from '@/features/constants/koeiroParam' 3 | 4 | export async function synthesizeVoiceKoeiromapApi( 5 | talk: Talk, 6 | apiKey: string, 7 | koeiroParam: KoeiroParam 8 | ) { 9 | try { 10 | const reducedStyle = emotionToTalkStyle(talk.emotion) 11 | 12 | const body = { 13 | message: talk.message, 14 | speakerX: koeiroParam.speakerX, 15 | speakerY: koeiroParam.speakerY, 16 | style: reducedStyle, 17 | apiKey: apiKey, 18 | } 19 | 20 | const res = await fetch('/api/tts-koeiromap', { 21 | method: 'POST', 22 | headers: { 23 | 'Content-Type': 'application/json', 24 | }, 25 | body: JSON.stringify(body), 26 | }) 27 | 28 | if (!res.ok) { 29 | throw new Error( 30 | `Koeiromap APIからの応答が異常です。ステータスコード: ${res.status}` 31 | ) 32 | } 33 | 34 | const data = await res.json() 35 | const url = data.audio 36 | 37 | if (url == null) { 38 | throw new Error('Koeiromap APIから音声URLが返されませんでした') 39 | } 40 | 41 | const resAudio = await fetch(url) 42 | if (!resAudio.ok) { 43 | throw new Error( 44 | `Koeiromap音声ファイルの取得に失敗しました。ステータスコード: ${resAudio.status}` 45 | ) 46 | } 47 | 48 | const buffer = await resAudio.arrayBuffer() 49 | return buffer 50 | } catch (error) { 51 | if (error instanceof Error) { 52 | throw new Error(`Koeiromapでエラーが発生しました: ${error.message}`) 53 | } else { 54 | throw new Error('Koeiromapで不明なエラーが発生しました') 55 | } 56 | } 57 | } 58 | 59 | const emotionToTalkStyle = (emotion: EmotionType): string => { 60 | switch (emotion) { 61 | case 'angry': 62 | return 'angry' 63 | case 'happy': 64 | return 'happy' 65 | case 'sad': 66 | return 'sad' 67 | default: 68 | return 'talk' 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceNijivoice.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | 3 | export async function synthesizeVoiceNijivoiceApi( 4 | talk: Talk, 5 | apiKey: string, 6 | voiceActorId: string, 7 | speed: number, 8 | emotionalLevel: number, 9 | soundDuration: number 10 | ) { 11 | try { 12 | const res = await fetch('/api/tts-nijivoice', { 13 | method: 'POST', 14 | headers: { 15 | 'Content-Type': 'application/json', 16 | }, 17 | body: JSON.stringify({ 18 | script: talk.message, 19 | speed, 20 | voiceActorId, 21 | apiKey, 22 | emotionalLevel, 23 | soundDuration, 24 | }), 25 | }) 26 | 27 | if (!res.ok) { 28 | throw new Error( 29 | `Nijivoice APIからの応答が異常です。ステータスコード: ${res.status}` 30 | ) 31 | } 32 | 33 | return await res.arrayBuffer() 34 | } catch (error) { 35 | if (error instanceof Error) { 36 | throw new Error(`Nijivoiceでエラーが発生しました: ${error.message}`) 37 | } else { 38 | throw new Error('Nijivoiceで不明なエラーが発生しました') 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceOpenAI.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | import { Language } from '@/features/constants/settings' 3 | 4 | export async function synthesizeVoiceOpenAIApi( 5 | talk: Talk, 6 | apiKey: string, 7 | voice: string, 8 | model: string, 9 | speed: number 10 | ) { 11 | try { 12 | const body = { 13 | message: talk.message, 14 | voice: voice, 15 | model: model, 16 | speed: speed, 17 | apiKey: apiKey, 18 | } 19 | 20 | const res = await fetch('/api/openAITTS', { 21 | method: 'POST', 22 | headers: { 23 | 'Content-Type': 'application/json', 24 | }, 25 | body: JSON.stringify(body), 26 | }) 27 | 28 | if (!res.ok) { 29 | throw new Error( 30 | `OpenAI APIからの応答が異常です。ステータスコード: ${res.status}` 31 | ) 32 | } 33 | 34 | const buffer = await res.arrayBuffer() 35 | return buffer 36 | } catch (error) { 37 | if (error instanceof Error) { 38 | throw new Error(`OpenAI TTSでエラーが発生しました: ${error.message}`) 39 | } else { 40 | throw new Error('OpenAI TTSで不明なエラーが発生しました') 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/features/messages/synthesizeVoiceVoicevox.ts: -------------------------------------------------------------------------------- 1 | import { Talk } from './messages' 2 | 3 | export async function synthesizeVoiceVoicevoxApi( 4 | talk: Talk, 5 | speaker: string, 6 | speed: number, 7 | pitch: number, 8 | intonation: number, 9 | serverUrl: string 10 | ): Promise { 11 | try { 12 | const res = await fetch('/api/tts-voicevox', { 13 | method: 'POST', 14 | headers: { 15 | 'Content-Type': 'application/json', 16 | }, 17 | body: JSON.stringify({ 18 | text: talk.message, 19 | speaker, 20 | speed, 21 | pitch, 22 | intonation, 23 | serverUrl, 24 | }), 25 | }) 26 | 27 | if (!res.ok) { 28 | throw new Error( 29 | `VOICEVOXからの応答が異常です。ステータスコード: ${res.status}` 30 | ) 31 | } 32 | 33 | return await res.arrayBuffer() 34 | } catch (error) { 35 | if (error instanceof Error) { 36 | throw new Error(`VOICEVOXでエラーが発生しました: ${error.message}`) 37 | } else { 38 | throw new Error('VOICEVOXで不明なエラーが発生しました') 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/features/stores/menu.ts: -------------------------------------------------------------------------------- 1 | import { create } from 'zustand' 2 | 3 | type SettingsTabKey = 4 | | 'description' 5 | | 'based' 6 | | 'character' 7 | | 'ai' 8 | | 'voice' 9 | | 'speechInput' 10 | | 'youtube' 11 | | 'slide' 12 | | 'log' 13 | | 'other' 14 | interface MenuState { 15 | showWebcam: boolean 16 | showCapture: boolean 17 | fileInput: HTMLInputElement | null 18 | bgFileInput: HTMLInputElement | null 19 | slideVisible: boolean 20 | activeSettingsTab: SettingsTabKey 21 | } 22 | 23 | const menuStore = create((set, get) => ({ 24 | showWebcam: false, 25 | showCapture: false, 26 | fileInput: null, 27 | bgFileInput: null, 28 | slideVisible: false, 29 | activeSettingsTab: 'description', 30 | })) 31 | 32 | export default menuStore 33 | -------------------------------------------------------------------------------- /src/features/stores/slide.ts: -------------------------------------------------------------------------------- 1 | import { create } from 'zustand' 2 | import { persist } from 'zustand/middleware' 3 | 4 | interface SlideState { 5 | isPlaying: boolean 6 | currentSlide: number 7 | selectedSlideDocs: string 8 | } 9 | 10 | const slideStore = create()( 11 | persist( 12 | (set, get) => ({ 13 | isPlaying: false, 14 | currentSlide: 0, 15 | selectedSlideDocs: '', 16 | }), 17 | { 18 | name: 'aitube-kit-slide', 19 | partialize: (state) => ({ selectedSlideDocs: state.selectedSlideDocs }), 20 | } 21 | ) 22 | ) 23 | 24 | export default slideStore 25 | -------------------------------------------------------------------------------- /src/features/stores/toast.ts: -------------------------------------------------------------------------------- 1 | import { create } from 'zustand' 2 | 3 | export interface Toast { 4 | id: string 5 | message: string 6 | type: 'success' | 'error' | 'info' | 'tool' 7 | duration?: number 8 | tag?: string 9 | closing?: boolean 10 | } 11 | 12 | interface ToastState { 13 | toasts: Toast[] 14 | addToast: (toast: Omit) => string | null 15 | removeToast: (identifier: string) => void 16 | closeToast: (identifier: string) => void 17 | } 18 | 19 | const toastStore = create((set, get) => ({ 20 | toasts: [], 21 | addToast: (toast) => { 22 | const { tag } = toast 23 | const currentToasts = get().toasts 24 | 25 | const filteredToasts = tag 26 | ? currentToasts.filter((t) => t.tag !== tag) 27 | : currentToasts 28 | 29 | const id = Math.random().toString(36).substring(2, 11) 30 | set(() => ({ 31 | toasts: [...filteredToasts, { ...toast, id }], 32 | })) 33 | return id 34 | }, 35 | removeToast: (identifier) => 36 | set((state) => ({ 37 | toasts: state.toasts.filter( 38 | (toast) => toast.id !== identifier && toast.tag !== identifier 39 | ), 40 | })), 41 | closeToast: (identifier) => { 42 | set((state) => ({ 43 | toasts: state.toasts.map((toast) => 44 | toast.id === identifier || toast.tag === identifier 45 | ? { ...toast, closing: true } 46 | : toast 47 | ), 48 | })) 49 | setTimeout(() => { 50 | set((state) => ({ 51 | toasts: state.toasts.filter( 52 | (toast) => toast.id !== identifier && toast.tag !== identifier 53 | ), 54 | })) 55 | }, 300) 56 | }, 57 | })) 58 | 59 | export default toastStore 60 | -------------------------------------------------------------------------------- /src/features/stores/websocketStore.ts: -------------------------------------------------------------------------------- 1 | import { create } from 'zustand' 2 | import { WebSocketManager } from '@/utils/WebSocketManager' 3 | import { TmpMessage } from '@/components/realtimeAPIUtils' 4 | 5 | interface WebSocketState { 6 | wsManager: WebSocketManager | null 7 | initializeWebSocket: ( 8 | t: (key: string, options?: any) => string, 9 | handlers: { 10 | onOpen?: (event: Event) => void 11 | onMessage?: (event: MessageEvent) => Promise 12 | onError?: (event: Event) => void 13 | onClose?: (event: Event) => void 14 | }, 15 | connectWebsocket: () => WebSocket | null 16 | ) => void 17 | disconnect: () => void 18 | reconnect: () => boolean 19 | } 20 | 21 | const webSocketStore = create((set, get) => ({ 22 | wsManager: null, 23 | initializeWebSocket: (t, handlers = {}, connectWebsocket) => { 24 | const defaultHandlers = { 25 | onOpen: (event: Event) => {}, 26 | onMessage: async (event: MessageEvent) => {}, 27 | onError: (event: Event) => {}, 28 | onClose: (event: Event) => {}, 29 | ...handlers, 30 | connectWebsocket, 31 | } 32 | const manager = new WebSocketManager(t, defaultHandlers, connectWebsocket) 33 | manager.connect() 34 | set({ wsManager: manager }) 35 | }, 36 | disconnect: () => { 37 | const { wsManager } = get() 38 | wsManager?.disconnect() 39 | set({ wsManager: null }) 40 | }, 41 | reconnect: () => { 42 | const { wsManager } = get() 43 | return wsManager ? wsManager.reconnect() : false 44 | }, 45 | })) 46 | 47 | export default webSocketStore 48 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/VRMAnimationLoaderPluginOptions.ts: -------------------------------------------------------------------------------- 1 | export interface VRMAnimationLoaderPluginOptions {} 2 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/VRMCVRMAnimation.ts: -------------------------------------------------------------------------------- 1 | import { VRMExpressionPresetName, VRMHumanBoneName } from '@pixiv/three-vrm' 2 | 3 | export interface VRMCVRMAnimation { 4 | specVersion: string 5 | humanoid: { 6 | humanBones: { 7 | [name in VRMHumanBoneName]?: { 8 | node: number 9 | } 10 | } 11 | } 12 | expressions?: { 13 | preset?: { 14 | [name in VRMExpressionPresetName]?: { 15 | node: number 16 | } 17 | } 18 | custom?: { 19 | [name: string]: { 20 | node: number 21 | } 22 | } 23 | } 24 | lookAt?: { 25 | node: number 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/loadVRMAnimation.ts: -------------------------------------------------------------------------------- 1 | import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader.js' 2 | import { VRMAnimation } from './VRMAnimation' 3 | import { VRMAnimationLoaderPlugin } from './VRMAnimationLoaderPlugin' 4 | 5 | const loader = new GLTFLoader() 6 | loader.register((parser) => new VRMAnimationLoaderPlugin(parser)) 7 | 8 | export async function loadVRMAnimation( 9 | url: string 10 | ): Promise { 11 | const gltf = await loader.loadAsync(url) 12 | 13 | const vrmAnimations: VRMAnimation[] = gltf.userData.vrmAnimations 14 | const vrmAnimation: VRMAnimation | undefined = vrmAnimations[0] 15 | 16 | return vrmAnimation ?? null 17 | } 18 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/utils/arrayChunk.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * ```js 3 | * arrayChunk( [ 1, 2, 3, 4, 5, 6 ], 2 ) 4 | * // will be 5 | * [ [ 1, 2 ], [ 3, 4 ], [ 5, 6 ] ] 6 | * ``` 7 | */ 8 | export function arrayChunk(array: ArrayLike, every: number): T[][] { 9 | const N = array.length 10 | 11 | const ret: T[][] = [] 12 | 13 | let current: T[] = [] 14 | let remaining = 0 15 | 16 | for (let i = 0; i < N; i++) { 17 | const el = array[i] 18 | 19 | if (remaining <= 0) { 20 | remaining = every 21 | current = [] 22 | ret.push(current) 23 | } 24 | 25 | current.push(el) 26 | remaining-- 27 | } 28 | 29 | return ret 30 | } 31 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/utils/linearstep.ts: -------------------------------------------------------------------------------- 1 | import { saturate } from './saturate' 2 | 3 | export const linearstep = (a: number, b: number, t: number) => 4 | saturate((t - a) / (b - a)) 5 | -------------------------------------------------------------------------------- /src/lib/VRMAnimation/utils/saturate.ts: -------------------------------------------------------------------------------- 1 | export const saturate = (x: number) => Math.min(Math.max(x, 0.0), 1.0) 2 | -------------------------------------------------------------------------------- /src/lib/VRMLookAtSmootherLoaderPlugin/VRMLookAtSmootherLoaderPlugin.ts: -------------------------------------------------------------------------------- 1 | import { VRMHumanoid, VRMLookAt, VRMLookAtLoaderPlugin } from '@pixiv/three-vrm' 2 | import { GLTF } from 'three/examples/jsm/loaders/GLTFLoader.js' 3 | import { VRMLookAtSmoother } from './VRMLookAtSmoother' 4 | 5 | export class VRMLookAtSmootherLoaderPlugin extends VRMLookAtLoaderPlugin { 6 | public get name(): string { 7 | return 'VRMLookAtSmootherLoaderPlugin' 8 | } 9 | 10 | public async afterRoot(gltf: GLTF): Promise { 11 | await super.afterRoot(gltf) 12 | 13 | const humanoid = gltf.userData.vrmHumanoid as VRMHumanoid | null 14 | const lookAt = gltf.userData.vrmLookAt as VRMLookAt | null 15 | 16 | if (humanoid != null && lookAt != null) { 17 | const lookAtSmoother = new VRMLookAtSmoother(humanoid, lookAt.applier) 18 | lookAtSmoother.copy(lookAt) 19 | gltf.userData.vrmLookAt = lookAtSmoother 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/lib/i18n.js: -------------------------------------------------------------------------------- 1 | import i18n from 'i18next' 2 | import { initReactI18next } from 'react-i18next' 3 | 4 | i18n.use(initReactI18next).init({ 5 | resources: { 6 | en: { 7 | translation: require('../../locales/en/translation.json'), 8 | }, 9 | ja: { 10 | translation: require('../../locales/ja/translation.json'), 11 | }, 12 | zh: { 13 | translation: require('../../locales/zh/translation.json'), 14 | }, 15 | ko: { 16 | translation: require('../../locales/ko/translation.json'), 17 | }, 18 | vi: { 19 | translation: require('../../locales/vi/translation.json'), 20 | }, 21 | fr: { 22 | translation: require('../../locales/fr/translation.json'), 23 | }, 24 | es: { 25 | translation: require('../../locales/es/translation.json'), 26 | }, 27 | pt: { 28 | translation: require('../../locales/pt/translation.json'), 29 | }, 30 | de: { 31 | translation: require('../../locales/de/translation.json'), 32 | }, 33 | ru: { 34 | translation: require('../../locales/ru/translation.json'), 35 | }, 36 | it: { 37 | translation: require('../../locales/it/translation.json'), 38 | }, 39 | ar: { 40 | translation: require('../../locales/ar/translation.json'), 41 | }, 42 | hi: { 43 | translation: require('../../locales/hi/translation.json'), 44 | }, 45 | pl: { 46 | translation: require('../../locales/pl/translation.json'), 47 | }, 48 | th: { 49 | translation: require('../../locales/th/translation.json'), 50 | }, 51 | }, 52 | lng: 'ja', 53 | fallbackLng: 'ja', 54 | interpolation: { 55 | escapeValue: false, 56 | }, 57 | }) 58 | 59 | export default i18n 60 | -------------------------------------------------------------------------------- /src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import '@charcoal-ui/icons' 2 | import type { AppProps } from 'next/app' 3 | import React, { useEffect } from 'react' 4 | import { Analytics } from '@vercel/analytics/react' 5 | 6 | import { isLanguageSupported } from '@/features/constants/settings' 7 | import homeStore from '@/features/stores/home' 8 | import settingsStore from '@/features/stores/settings' 9 | import '@/styles/globals.css' 10 | import migrateStore from '@/utils/migrateStore' 11 | import i18n from '../lib/i18n' 12 | 13 | export default function App({ Component, pageProps }: AppProps) { 14 | useEffect(() => { 15 | const hs = homeStore.getState() 16 | const ss = settingsStore.getState() 17 | 18 | if (hs.userOnboarded) { 19 | i18n.changeLanguage(ss.selectLanguage) 20 | return 21 | } 22 | 23 | migrateStore() 24 | 25 | const browserLanguage = navigator.language 26 | const languageCode = browserLanguage.match(/^zh/i) 27 | ? 'zh' 28 | : browserLanguage.split('-')[0].toLowerCase() 29 | 30 | let language = ss.selectLanguage 31 | if (!language) { 32 | language = isLanguageSupported(languageCode) ? languageCode : 'ja' 33 | } 34 | i18n.changeLanguage(language) 35 | settingsStore.setState({ selectLanguage: language }) 36 | 37 | homeStore.setState({ userOnboarded: true }) 38 | }, []) 39 | 40 | return ( 41 | <> 42 | 43 | 44 | 45 | ) 46 | } 47 | -------------------------------------------------------------------------------- /src/pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import { Html, Head, Main, NextScript } from 'next/document' 2 | 3 | export default function Document() { 4 | return ( 5 | 6 | 7 | 8 | 13 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /src/pages/api/ai/custom.ts: -------------------------------------------------------------------------------- 1 | import { Message } from '@/features/messages/messages' 2 | import { NextRequest } from 'next/server' 3 | import { handleCustomApi } from '../services/customApi' 4 | 5 | export const config = { 6 | runtime: 'edge', 7 | } 8 | 9 | export default async function handler(req: NextRequest) { 10 | if (req.method !== 'POST') { 11 | return new Response( 12 | JSON.stringify({ 13 | error: 'Method Not Allowed', 14 | errorCode: 'METHOD_NOT_ALLOWED', 15 | }), 16 | { 17 | status: 405, 18 | headers: { 'Content-Type': 'application/json' }, 19 | } 20 | ) 21 | } 22 | 23 | const { 24 | messages, 25 | stream, 26 | customApiUrl = '', 27 | customApiHeaders = '{}', 28 | customApiBody = '{}', 29 | } = await req.json() 30 | 31 | try { 32 | return await handleCustomApi( 33 | messages, 34 | customApiUrl, 35 | customApiHeaders === '' ? '{}' : customApiHeaders, 36 | customApiBody === '' ? '{}' : customApiBody, 37 | stream 38 | ) 39 | } catch (error) { 40 | console.error('Error in Custom API call:', error) 41 | 42 | return new Response( 43 | JSON.stringify({ 44 | error: 'Unexpected Error', 45 | errorCode: 'CustomAPIError', 46 | }), 47 | { 48 | status: 500, 49 | headers: { 'Content-Type': 'application/json' }, 50 | } 51 | ) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/pages/api/azureOpenAITTS.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import { AzureOpenAI } from 'openai' 3 | 4 | export default async function handler( 5 | req: NextApiRequest, 6 | res: NextApiResponse 7 | ) { 8 | if (req.method !== 'POST') { 9 | return res.status(405).json({ error: 'Method not allowed' }) 10 | } 11 | 12 | const { message, voice, speed, apiKey, endpoint } = req.body 13 | 14 | const azureTTSKey = apiKey || process.env.AZURE_TTS_KEY 15 | const azureTTSEndpoint = endpoint || process.env.AZURE_TTS_ENDPOINT 16 | 17 | if (!message || !voice || !speed || !azureTTSKey || !azureTTSEndpoint) { 18 | return res.status(400).json({ error: 'Missing required parameters' }) 19 | } 20 | 21 | try { 22 | const url = new URL(azureTTSEndpoint) 23 | const pathParts = url.pathname.split('/') 24 | let deploymentName = pathParts.find((part) => part === 'deployments') 25 | ? pathParts[pathParts.indexOf('deployments') + 1] 26 | : 'tts' 27 | const apiVersion = 28 | url.searchParams.get('api-version') || '2024-02-15-preview' 29 | 30 | const azureOpenAI = new AzureOpenAI({ 31 | apiKey: apiKey, 32 | endpoint: azureTTSEndpoint, 33 | apiVersion: apiVersion, 34 | deployment: deploymentName, 35 | }) 36 | 37 | const mp3 = await azureOpenAI.audio.speech.create({ 38 | model: deploymentName, 39 | voice: voice, 40 | input: message, 41 | speed: speed, 42 | }) 43 | 44 | const buffer = Buffer.from(await mp3.arrayBuffer()) 45 | 46 | res.setHeader('Content-Type', 'audio/mpeg') 47 | res.send(buffer) 48 | } catch (error) { 49 | console.error('Azure OpenAI TTS error:', error) 50 | res.status(500).json({ error: 'Failed to generate speech' }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/pages/api/convertMarkdown.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import { Marpit } from '@marp-team/marpit' 3 | import fs from 'fs/promises' 4 | import path from 'path' 5 | 6 | export default async function handler( 7 | req: NextApiRequest, 8 | res: NextApiResponse 9 | ) { 10 | if (req.method === 'POST') { 11 | const { slideName } = req.body as { slideName: string } 12 | 13 | if (!slideName) { 14 | return res.status(400).json({ message: 'slideName is required' }) 15 | } 16 | 17 | try { 18 | const markdownPath = path.join( 19 | process.cwd(), 20 | 'public', 21 | 'slides', 22 | slideName, 23 | 'slides.md' 24 | ) 25 | const markdown = await fs.readFile(markdownPath, 'utf-8') 26 | 27 | let css = '' 28 | try { 29 | const cssPath = path.join( 30 | process.cwd(), 31 | 'public', 32 | 'slides', 33 | slideName, 34 | 'theme.css' 35 | ) 36 | css = await fs.readFile(cssPath, 'utf-8') 37 | } catch (cssError) { 38 | console.warn(`CSSファイルが見つかりません: ${slideName}/theme.css`) 39 | // CSSファイルが見つからない場合は空文字列を使用 40 | } 41 | 42 | const marpit = new Marpit({ 43 | inlineSVG: true, 44 | }) 45 | if (css) { 46 | marpit.themeSet.default = marpit.themeSet.add(css) 47 | } 48 | 49 | const { html, css: generatedCss } = marpit.render(markdown) 50 | 51 | res.status(200).json({ html, css: generatedCss }) 52 | } catch (error) { 53 | console.error(error) 54 | res.status(500).json({ 55 | message: 'Error processing markdown', 56 | error: (error as Error).message, 57 | }) 58 | } 59 | } else { 60 | res.status(405).json({ message: 'Method not allowed' }) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/pages/api/get-background-list.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import fs from 'fs' 3 | import path from 'path' 4 | 5 | export default async function handler( 6 | req: NextApiRequest, 7 | res: NextApiResponse 8 | ) { 9 | try { 10 | const backgroundsDir = path.join(process.cwd(), 'public/backgrounds') 11 | 12 | if (!fs.existsSync(backgroundsDir)) { 13 | fs.mkdirSync(backgroundsDir, { recursive: true }) 14 | return res.status(200).json([]) 15 | } 16 | 17 | const files = fs.readdirSync(backgroundsDir) 18 | const imageFiles = files.filter((file) => { 19 | const extension = path.extname(file).toLowerCase() 20 | return ['.jpg', '.jpeg', '.png', '.gif', '.webp'].includes(extension) 21 | }) 22 | 23 | res.status(200).json(imageFiles) 24 | } catch (error) { 25 | console.error('Error fetching background list:', error) 26 | res.status(500).json({ error: 'Failed to fetch background list' }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/pages/api/get-live2d-list.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import fs from 'fs' 3 | import path from 'path' 4 | 5 | interface Live2DModelInfo { 6 | path: string 7 | name: string 8 | expressions: string[] 9 | motions: string[] 10 | } 11 | 12 | export default async function handler( 13 | req: NextApiRequest, 14 | res: NextApiResponse 15 | ) { 16 | const live2dDir = path.join(process.cwd(), 'public/live2d') 17 | 18 | try { 19 | if (!fs.existsSync(live2dDir)) { 20 | return res.status(404).json({ error: 'Live2D directory not found' }) 21 | } 22 | 23 | const folders = await fs.promises.readdir(live2dDir, { 24 | withFileTypes: true, 25 | }) 26 | const live2dModels: Live2DModelInfo[] = [] 27 | 28 | for (const folder of folders) { 29 | if (folder.isDirectory()) { 30 | const folderPath = path.join(live2dDir, folder.name) 31 | const files = await fs.promises.readdir(folderPath) 32 | const model3File = files.find((file) => file.endsWith('.model3.json')) 33 | 34 | if (model3File) { 35 | const modelPath = `/live2d/${folder.name}/${model3File}` 36 | const fullPath = path.join(folderPath, model3File) 37 | const modelContent = await fs.promises.readFile(fullPath, 'utf-8') 38 | const modelJson = JSON.parse(modelContent) 39 | 40 | // Extract expressions and motions from model3.json 41 | const expressions = 42 | modelJson.FileReferences.Expressions?.map( 43 | (exp: { Name: string }) => exp.Name 44 | ) || [] 45 | const motions = Object.keys(modelJson.FileReferences.Motions || {}) 46 | 47 | live2dModels.push({ 48 | path: modelPath, 49 | name: folder.name, 50 | expressions, 51 | motions, 52 | }) 53 | } 54 | } 55 | } 56 | 57 | res.status(200).json(live2dModels) 58 | } catch (error) { 59 | console.error('Error reading Live2D directory:', error) 60 | res.status(500).json({ 61 | error: 'Failed to get Live2D model list', 62 | }) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/pages/api/get-nijivoice-actors.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | 3 | export default async function handler( 4 | req: NextApiRequest, 5 | res: NextApiResponse 6 | ) { 7 | const { apiKey } = req.query 8 | 9 | const nijivoiceApiKey = apiKey || process.env.NIJIVOICE_API_KEY 10 | if (!nijivoiceApiKey) { 11 | return res.status(400).json({ error: 'API key is required' }) 12 | } 13 | 14 | try { 15 | const response = await fetch( 16 | 'https://api.nijivoice.com/api/platform/v1/voice-actors', 17 | { 18 | headers: { 19 | 'x-api-key': nijivoiceApiKey as string, 20 | }, 21 | } 22 | ) 23 | const data = await response.json() 24 | return res.status(200).json(data) 25 | } catch (error) { 26 | console.error('Failed to fetch voice actors:', error) 27 | return res.status(500).json({ error: 'Failed to fetch voice actors' }) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/pages/api/get-vrm-list.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import fs from 'fs' 3 | import path from 'path' 4 | 5 | export default async function handler( 6 | req: NextApiRequest, 7 | res: NextApiResponse 8 | ) { 9 | const vrmDir = path.join(process.cwd(), 'public/vrm') 10 | 11 | try { 12 | if (!fs.existsSync(vrmDir)) { 13 | return res.status(404).json({ error: 'VRM directory not found' }) 14 | } 15 | const files = await fs.promises.readdir(vrmDir) 16 | const vrmFiles = files.filter((file) => file.endsWith('.vrm')) 17 | res.status(200).json(vrmFiles) 18 | } catch (error) { 19 | console.error('Error reading VRM directory:', error) 20 | res.status(500).json({ 21 | error: 'Failed to get VRM file list', 22 | }) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/pages/api/getSlideFolders.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import path from 'path' 3 | import { NextApiRequest, NextApiResponse } from 'next' 4 | 5 | export default function handler(req: NextApiRequest, res: NextApiResponse) { 6 | const slidesDir = path.join(process.cwd(), 'public', 'slides') 7 | 8 | try { 9 | const folders = fs 10 | .readdirSync(slidesDir, { withFileTypes: true }) 11 | .filter((dirent) => dirent.isDirectory()) 12 | .filter((dirent) => { 13 | const folderPath = path.join(slidesDir, dirent.name) 14 | const hasSlidesFile = fs.existsSync(path.join(folderPath, 'slides.md')) 15 | const hasScriptsFile = fs.existsSync( 16 | path.join(folderPath, 'scripts.json') 17 | ) 18 | return hasSlidesFile && hasScriptsFile 19 | }) 20 | .map((dirent) => dirent.name) 21 | 22 | res.status(200).json(folders) 23 | } catch (error) { 24 | console.error('Error reading slides directory:', error) 25 | res.status(500).json({ error: 'Unable to read slides directory' }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/pages/api/getSupplement.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import fs from 'fs/promises' 3 | import path from 'path' 4 | 5 | type ResponseData = { 6 | content?: string 7 | message?: string 8 | error?: string 9 | } 10 | 11 | export default async function handler( 12 | req: NextApiRequest, 13 | res: NextApiResponse 14 | ) { 15 | if (req.method !== 'GET') { 16 | return res.status(405).json({ message: 'Method Not Allowed' }) 17 | } 18 | 19 | const { slideName } = req.query 20 | 21 | if (typeof slideName !== 'string' || !slideName) { 22 | return res.status(400).json({ 23 | message: 'Bad Request: Missing or invalid slideName query parameter', 24 | }) 25 | } 26 | 27 | // slideNameのサニタイズ (updateScript.tsと同様) 28 | const sanitizedSlideName = path 29 | .normalize(slideName) 30 | .replace(/^(\.\.(\/|\\|$))+/, '') 31 | if ( 32 | /[\\/:\*\?"<>\|]/.test(sanitizedSlideName) || 33 | sanitizedSlideName.includes('..') 34 | ) { 35 | return res.status(400).json({ 36 | message: 37 | 'Bad Request: Invalid slideName contains invalid characters or path traversal attempts.', 38 | }) 39 | } 40 | 41 | const filePath = path.join( 42 | process.cwd(), 43 | 'public', 44 | 'slides', 45 | sanitizedSlideName, 46 | 'supplement.txt' 47 | ) 48 | 49 | try { 50 | const content = await fs.readFile(filePath, 'utf-8') 51 | res.status(200).json({ content }) 52 | } catch (error: any) { 53 | // ファイルが存在しない場合は空の内容を返す (エラーではなく正常系として扱う) 54 | if (error.code === 'ENOENT') { 55 | res.status(200).json({ content: '' }) 56 | } else { 57 | console.error(`Error reading file: ${filePath}`, error) 58 | res.status(500).json({ 59 | message: 'Internal Server Error', 60 | error: error instanceof Error ? error.message : String(error), 61 | }) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/pages/api/openAITTS.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import OpenAI from 'openai' 3 | 4 | // 感情表現を豊かにする追加指示を行うモデル、念の為リスト形式 5 | const gpt4oEmotionalInstructionModels = ['gpt-4o'] 6 | 7 | export default async function handler( 8 | req: NextApiRequest, 9 | res: NextApiResponse 10 | ) { 11 | if (req.method !== 'POST') { 12 | return res.status(405).json({ error: 'Method not allowed' }) 13 | } 14 | 15 | const { message, voice, model, speed, apiKey, emotion } = req.body 16 | const openaiKey = 17 | apiKey || process.env.OPENAI_TTS_KEY || process.env.OPENAI_API_KEY 18 | 19 | if (!message || !voice || !model || !openaiKey) { 20 | return res.status(400).json({ error: 'Missing required parameters' }) 21 | } 22 | 23 | try { 24 | const openai = new OpenAI({ apiKey: openaiKey }) 25 | const options: { 26 | model: any 27 | voice: any 28 | speed: any 29 | input: any 30 | instructions?: any 31 | } = { 32 | model: model, 33 | voice: voice, 34 | speed: speed, 35 | input: message, 36 | } 37 | 38 | if (gpt4oEmotionalInstructionModels.some((m) => model.includes(m))) { 39 | options.instructions = `Please speak "${message}" with rich emotional expression.` 40 | } 41 | 42 | const mp3 = await openai.audio.speech.create(options) 43 | 44 | const buffer = Buffer.from(await mp3.arrayBuffer()) 45 | 46 | res.setHeader('Content-Type', 'audio/mpeg') 47 | res.send(buffer) 48 | } catch (error) { 49 | console.error('OpenAI TTS error:', error) 50 | res.status(500).json({ error: 'Failed to generate speech' }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/pages/api/services/utils.ts: -------------------------------------------------------------------------------- 1 | import { Message } from '@/features/messages/messages' 2 | 3 | /** 4 | * AIサービスとモデルに応じてメッセージを修正する 5 | */ 6 | export function modifyMessages( 7 | aiService: string, 8 | model: string, 9 | messages: Message[] 10 | ): Message[] { 11 | if ( 12 | aiService === 'anthropic' || 13 | aiService === 'perplexity' || 14 | (aiService === 'deepseek' && model === 'deepseek-reasoner') 15 | ) { 16 | return modifyAnthropicMessages(messages) 17 | } 18 | return messages 19 | } 20 | 21 | /** 22 | * Anthropicのメッセージフォーマットに合わせて修正する 23 | */ 24 | function modifyAnthropicMessages(messages: Message[]): Message[] { 25 | const systemMessage: Message | undefined = messages.find( 26 | (message) => message.role === 'system' 27 | ) 28 | let userMessages = messages 29 | .filter((message) => message.role !== 'system') 30 | .filter((message) => message.content !== '') 31 | 32 | userMessages = consolidateMessages(userMessages) 33 | 34 | while (userMessages.length > 0 && userMessages[0].role !== 'user') { 35 | userMessages.shift() 36 | } 37 | 38 | const result: Message[] = systemMessage 39 | ? [systemMessage, ...userMessages] 40 | : userMessages 41 | return result 42 | } 43 | 44 | /** 45 | * 同じroleのメッセージを結合する 46 | */ 47 | export function consolidateMessages(messages: Message[]) { 48 | const consolidated: Message[] = [] 49 | let lastRole: string | null = null 50 | let combinedContent: 51 | | string 52 | | [ 53 | { 54 | type: 'text' 55 | text: string 56 | }, 57 | { 58 | type: 'image' 59 | image: string 60 | }, 61 | ] 62 | 63 | messages.forEach((message, index) => { 64 | if (message.role === lastRole) { 65 | if (typeof combinedContent === 'string') { 66 | combinedContent += '\n' + message.content 67 | } else { 68 | combinedContent[0].text += '\n' + message.content 69 | } 70 | } else { 71 | if (lastRole !== null) { 72 | consolidated.push({ role: lastRole, content: combinedContent }) 73 | } 74 | lastRole = message.role 75 | combinedContent = message.content || '' 76 | } 77 | 78 | if (index === messages.length - 1) { 79 | consolidated.push({ role: lastRole, content: combinedContent }) 80 | } 81 | }) 82 | 83 | return consolidated 84 | } 85 | -------------------------------------------------------------------------------- /src/pages/api/tts-aivisspeech.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import axios from 'axios' 3 | 4 | type Data = { 5 | audio?: ArrayBuffer 6 | error?: string 7 | } 8 | 9 | export default async function handler( 10 | req: NextApiRequest, 11 | res: NextApiResponse 12 | ) { 13 | const { text, speaker, speed, pitch, intonation, serverUrl } = req.body 14 | const apiUrl = 15 | serverUrl || process.env.AIVIS_SPEECH_SERVER_URL || 'http://localhost:10101' 16 | 17 | try { 18 | // 1. Audio Query の生成 19 | const queryResponse = await axios.post( 20 | `${apiUrl}/audio_query?speaker=${speaker}&text=${encodeURIComponent(text)}`, 21 | null, 22 | { 23 | headers: { 24 | 'Content-Type': 'application/json', 25 | }, 26 | timeout: 30000, 27 | } 28 | ) 29 | 30 | const queryData = queryResponse.data 31 | queryData.speedScale = speed 32 | queryData.pitchScale = pitch 33 | queryData.intonationScale = intonation 34 | 35 | // 2. 音声合成 36 | const synthesisResponse = await axios.post( 37 | `${apiUrl}/synthesis?speaker=${speaker}`, 38 | queryData, 39 | { 40 | headers: { 41 | 'Content-Type': 'application/json', 42 | Accept: 'audio/wav', 43 | }, 44 | responseType: 'stream', 45 | timeout: 30000, 46 | } 47 | ) 48 | 49 | res.setHeader('Content-Type', 'audio/wav') 50 | synthesisResponse.data.pipe(res) 51 | } catch (error) { 52 | console.error('Error in AivisSpeech TTS:', error) 53 | res.status(500).json({ error: 'Internal Server Error' }) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/pages/api/tts-google.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import textToSpeech from '@google-cloud/text-to-speech' 3 | import { google } from '@google-cloud/text-to-speech/build/protos/protos' 4 | 5 | type Data = { 6 | audio?: string | Uint8Array // Base64 encoded string or Uint8Array 7 | error?: string 8 | } 9 | 10 | export default async function handler( 11 | req: NextApiRequest, 12 | res: NextApiResponse 13 | ) { 14 | const message = req.body.message 15 | const ttsType = req.body.ttsType 16 | const languageCode = req.body.languageCode || 'ja-JP' 17 | 18 | try { 19 | // Check if GOOGLE_TTS_KEY exists 20 | if (process.env.GOOGLE_TTS_KEY) { 21 | // Use API Key based authentication 22 | const response = await fetch( 23 | `https://texttospeech.googleapis.com/v1/text:synthesize?key=${process.env.GOOGLE_TTS_KEY}`, 24 | { 25 | method: 'POST', 26 | headers: { 27 | 'Content-Type': 'application/json', 28 | }, 29 | body: JSON.stringify({ 30 | input: { text: message }, 31 | voice: { languageCode: languageCode, name: ttsType }, 32 | audioConfig: { audioEncoding: 'MP3' }, 33 | }), 34 | } 35 | ) 36 | 37 | if (!response.ok) { 38 | throw new Error(`HTTP error! status: ${response.status}`) 39 | } 40 | 41 | const data = await response.json() 42 | res.status(200).json({ audio: data.audioContent }) 43 | } else { 44 | // Use credentials based authentication 45 | const client = new textToSpeech.TextToSpeechClient() 46 | 47 | const request: google.cloud.texttospeech.v1.ISynthesizeSpeechRequest = { 48 | input: { text: message }, 49 | voice: { languageCode: languageCode, name: ttsType }, 50 | audioConfig: { audioEncoding: 'MP3' }, 51 | } 52 | 53 | const [response] = await client.synthesizeSpeech(request) 54 | const audio = response.audioContent 55 | 56 | // Convert Uint8Array to Base64 if needed 57 | const audioContent = Buffer.from(audio as Uint8Array).toString('base64') 58 | 59 | res.status(200).json({ audio: audioContent }) 60 | } 61 | } catch (error) { 62 | console.error('Error in Google Text-to-Speech:', error) 63 | res.status(500).json({ error: 'Internal Server Error' }) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/pages/api/tts-koeiromap.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import axios from 'axios' 3 | 4 | type Data = { 5 | audio?: Uint8Array 6 | error?: string 7 | } 8 | 9 | export default async function handler( 10 | req: NextApiRequest, 11 | res: NextApiResponse 12 | ) { 13 | const { message, speakerX, speakerY, style, apiKey } = req.body 14 | 15 | try { 16 | const response = await axios.post( 17 | 'https://api.rinna.co.jp/koeiromap/v1.0/infer', 18 | { 19 | text: message, 20 | speaker_x: speakerX, 21 | speaker_y: speakerY, 22 | style: style, 23 | output_format: 'mp3', 24 | }, 25 | { 26 | headers: { 27 | 'Content-Type': 'application/json', 28 | 'Cache-Control': 'no-cache', 29 | 'Ocp-Apim-Subscription-Key': apiKey, 30 | }, 31 | } 32 | ) 33 | 34 | const audio = response.data.audio 35 | res.status(200).json({ audio }) 36 | } catch (error) { 37 | console.error('Error in Koeiromap TTS:', error) 38 | res.status(500).json({ error: 'Internal Server Error' }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/pages/api/tts-nijivoice.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import axios from 'axios' 3 | 4 | type Data = { 5 | audio?: Buffer 6 | error?: string 7 | } 8 | 9 | export default async function handler( 10 | req: NextApiRequest, 11 | res: NextApiResponse 12 | ) { 13 | const { script, speed, voiceActorId, apiKey, emotionalLevel, soundDuration } = 14 | req.body 15 | 16 | const nijivoiceApiKey = apiKey || process.env.NIJIVOICE_API_KEY 17 | if (!nijivoiceApiKey) { 18 | return res.status(400).json({ error: 'API key is required' }) 19 | } 20 | 21 | try { 22 | const response = await axios.post( 23 | `https://api.nijivoice.com/api/platform/v1/voice-actors/${voiceActorId}/generate-encoded-voice`, 24 | { 25 | script, 26 | speed: speed.toString(), 27 | format: 'mp3', 28 | emotionalLevel: emotionalLevel.toString(), 29 | soundDuration: soundDuration.toString(), 30 | }, 31 | { 32 | headers: { 33 | 'Content-Type': 'application/json', 34 | 'x-api-key': nijivoiceApiKey, 35 | }, 36 | timeout: 30000, 37 | } 38 | ) 39 | 40 | const base64Audio = response.data.generatedVoice.base64Audio 41 | const audioBuffer = Buffer.from(base64Audio, 'base64') 42 | 43 | res.writeHead(200, { 44 | 'Content-Type': 'audio/mpeg', 45 | 'Content-Length': audioBuffer.length, 46 | }) 47 | res.end(audioBuffer) 48 | } catch (error) { 49 | console.error('Error in Nijivoice TTS:', error) 50 | res.status(500).json({ error: 'Internal Server Error' }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/pages/api/tts-voicevox.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import axios from 'axios' 3 | 4 | type Data = { 5 | audio?: ArrayBuffer 6 | error?: string 7 | } 8 | 9 | export default async function handler( 10 | req: NextApiRequest, 11 | res: NextApiResponse 12 | ) { 13 | const { text, speaker, speed, pitch, intonation, serverUrl } = req.body 14 | const apiUrl = 15 | serverUrl || process.env.VOICEVOX_SERVER_URL || 'http://localhost:50021' 16 | 17 | try { 18 | // 1. Audio Query の生成 19 | const queryResponse = await axios.post( 20 | `${apiUrl}/audio_query?speaker=${speaker}&text=${encodeURIComponent(text)}`, 21 | null, 22 | { 23 | headers: { 24 | 'Content-Type': 'application/json', 25 | }, 26 | timeout: 30000, 27 | } 28 | ) 29 | 30 | const queryData = queryResponse.data 31 | queryData.speedScale = speed 32 | queryData.pitchScale = pitch 33 | queryData.intonationScale = intonation 34 | 35 | // 2. 音声合成 36 | const synthesisResponse = await axios.post( 37 | `${apiUrl}/synthesis?speaker=${speaker}`, 38 | queryData, 39 | { 40 | headers: { 41 | 'Content-Type': 'application/json', 42 | Accept: 'audio/wav', 43 | }, 44 | responseType: 'stream', 45 | timeout: 30000, 46 | } 47 | ) 48 | 49 | res.setHeader('Content-Type', 'audio/wav') 50 | synthesisResponse.data.pipe(res) 51 | } catch (error) { 52 | console.error('Error in VOICEVOX TTS:', error) 53 | res.status(500).json({ error: 'Internal Server Error' }) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/pages/api/update-aivis-speakers.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next' 2 | import fs from 'fs/promises' 3 | import path from 'path' 4 | 5 | interface Style { 6 | name: string 7 | id: number 8 | type: string 9 | } 10 | 11 | interface Speaker { 12 | name: string 13 | speaker_uuid: string 14 | styles: Style[] 15 | } 16 | 17 | interface AivisSpeaker { 18 | speaker: string 19 | id: number 20 | } 21 | 22 | export default async function handler( 23 | req: NextApiRequest, 24 | res: NextApiResponse 25 | ) { 26 | try { 27 | // APIからデータを取得 28 | const serverUrl = 29 | req.query.serverUrl || 30 | process.env.AIVIS_SPEECH_SERVER_URL || 31 | 'http://127.0.0.1:10101' 32 | const response = await fetch(`${serverUrl}/speakers`) 33 | const speakers: Speaker[] = await response.json() 34 | 35 | // Aivis形式に変換 36 | const aivisSpeakers: AivisSpeaker[] = speakers.flatMap((speaker) => 37 | speaker.styles.map((style) => ({ 38 | speaker: `${speaker.name}/${style.name}`, 39 | id: style.id, 40 | })) 41 | ) 42 | 43 | // JSONファイルに書き込み 44 | const filePath = path.join(process.cwd(), 'public/speakers_aivis.json') 45 | await fs.writeFile(filePath, JSON.stringify(aivisSpeakers, null, 2) + '\n') 46 | 47 | res.status(200).json({ message: 'Speakers file updated successfully' }) 48 | } catch (error) { 49 | console.error('Error updating speakers:', error) 50 | res.status(500).json({ error: 'Failed to update speakers file' }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/pages/api/upload-background.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import formidable from 'formidable' 3 | import fs from 'fs' 4 | import path from 'path' 5 | 6 | export const config = { 7 | api: { 8 | bodyParser: false, 9 | }, 10 | } 11 | const formOptions: formidable.Options = { 12 | maxFileSize: 100 * 1024 * 1024, // 100MB 13 | filter: (part) => { 14 | return part.mimetype?.startsWith('image/') || false 15 | }, 16 | } 17 | 18 | export default async function handler( 19 | req: NextApiRequest, 20 | res: NextApiResponse 21 | ) { 22 | if (req.method !== 'POST') { 23 | return res.status(405).json({ error: 'Method not allowed' }) 24 | } 25 | 26 | const form = formidable(formOptions) 27 | 28 | try { 29 | const [fields, files] = await form.parse(req) 30 | const file = files.file?.[0] 31 | 32 | if (!file) { 33 | return res.status(400).json({ error: 'No file uploaded' }) 34 | } 35 | 36 | const validExtensions = ['.jpg', '.jpeg', '.png', '.gif', '.webp'] 37 | const extension = path.extname(file.originalFilename || '').toLowerCase() 38 | 39 | if (!validExtensions.includes(extension)) { 40 | return res.status(400).json({ 41 | error: 'Invalid file type', 42 | message: 'Only JPG, PNG, GIF and WebP images can be uploaded', 43 | }) 44 | } 45 | 46 | const bgDir = path.join(process.cwd(), 'public/backgrounds') 47 | if (!fs.existsSync(bgDir)) { 48 | fs.mkdirSync(bgDir, { recursive: true }) 49 | } 50 | 51 | const newPath = path.join( 52 | bgDir, 53 | file.originalFilename || 'background' + extension 54 | ) 55 | await fs.promises.copyFile(file.filepath, newPath) 56 | 57 | res.status(200).json({ 58 | path: `/backgrounds/${file.originalFilename}`, 59 | }) 60 | } catch (error) { 61 | res.status(500).json({ error: 'Failed to upload file' }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/pages/api/upload-vrm-list.ts: -------------------------------------------------------------------------------- 1 | import { NextApiRequest, NextApiResponse } from 'next' 2 | import formidable from 'formidable' 3 | import fs from 'fs' 4 | import path from 'path' 5 | 6 | export const config = { 7 | api: { 8 | bodyParser: false, 9 | }, 10 | } 11 | const formOptions = { 12 | maxFileSize: 200 * 1024 * 1024, 13 | filter: ({ mimetype }: { mimetype: string | null }) => 14 | mimetype === 'application/octet-stream' || mimetype === 'model/vrm', 15 | } 16 | 17 | export default async function handler( 18 | req: NextApiRequest, 19 | res: NextApiResponse 20 | ) { 21 | if (req.method !== 'POST') { 22 | return res.status(405).json({ error: 'Method not allowed' }) 23 | } 24 | 25 | const form = formidable(formOptions) 26 | 27 | try { 28 | const [fields, files] = await form.parse(req) 29 | const file = files.file?.[0] 30 | 31 | if (!file) { 32 | return res.status(400).json({ error: 'No file uploaded' }) 33 | } 34 | 35 | if (!file.originalFilename?.toLowerCase().endsWith('.vrm')) { 36 | return res.status(400).json({ 37 | error: 'Invalid file type', 38 | message: 'Only VRM files can be uploaded', 39 | }) 40 | } 41 | 42 | const vrmDir = path.join(process.cwd(), 'public/vrm') 43 | if (!fs.existsSync(vrmDir)) { 44 | fs.mkdirSync(vrmDir, { recursive: true }) 45 | } 46 | 47 | const newPath = path.join(vrmDir, file.originalFilename || 'uploaded.vrm') 48 | await fs.promises.copyFile(file.filepath, newPath) 49 | 50 | res.status(200).json({ 51 | path: `/vrm/${file.originalFilename}`, 52 | }) 53 | } catch (error) { 54 | res.status(500).json({ error: 'Failed to upload file' }) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | body { 6 | background-position: top center; 7 | background-attachment: fixed; 8 | background-size: cover; 9 | background-repeat: no-repeat; 10 | } 11 | 12 | @layer components { 13 | .input-range { 14 | -webkit-appearance: none; 15 | appearance: none; 16 | background-color: #858585; 17 | height: 2px; 18 | width: 100%; 19 | border-radius: 4px; 20 | } 21 | .input-range:focus, 22 | .input-range:active { 23 | outline: none; 24 | } 25 | .input-range::-webkit-slider-thumb { 26 | -webkit-appearance: none; 27 | appearance: none; 28 | cursor: pointer; 29 | position: relative; 30 | width: 24px; 31 | height: 24px; 32 | display: block; 33 | border: 2px solid #856292; 34 | background-color: #ffffff; 35 | border-radius: 50%; 36 | -webkit-border-radius: 50%; 37 | } 38 | } 39 | 40 | @layer utilities { 41 | .scroll-hidden { 42 | -ms-overflow-style: none; 43 | scrollbar-width: none; 44 | } 45 | 46 | .scroll-hidden::-webkit-scrollbar { 47 | display: none; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/types/charcoal-ui.d.ts: -------------------------------------------------------------------------------- 1 | declare module '@charcoal-ui/icons' { 2 | export interface KnownIconType { 3 | [key: string]: string 4 | } 5 | } 6 | 7 | declare namespace JSX { 8 | interface IntrinsicElements { 9 | 'pixiv-icon': React.DetailedHTMLProps< 10 | React.HTMLAttributes & { 11 | name: string 12 | scale: string 13 | }, 14 | HTMLElement 15 | > 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/utils/audioBufferManager.ts: -------------------------------------------------------------------------------- 1 | export type SendCallback = (buffer: ArrayBuffer) => Promise 2 | 3 | export class AudioBufferManager { 4 | private buffer: ArrayBuffer = new ArrayBuffer(0) 5 | private readonly BUFFER_THRESHOLD: number 6 | private readonly sendCallback: SendCallback 7 | 8 | constructor(sendCallback: SendCallback, bufferThreshold: number = 100_000) { 9 | this.sendCallback = sendCallback 10 | this.BUFFER_THRESHOLD = bufferThreshold 11 | } 12 | 13 | mergeArrayBuffers(buffer1: ArrayBuffer, buffer2: ArrayBuffer): ArrayBuffer { 14 | const tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength) 15 | tmp.set(new Uint8Array(buffer1), 0) 16 | tmp.set(new Uint8Array(buffer2), buffer1.byteLength) 17 | return tmp.buffer 18 | } 19 | 20 | addData(newData: ArrayBuffer): void { 21 | console.log('Adding data to buffer:', newData.byteLength) 22 | this.buffer = this.mergeArrayBuffers(this.buffer, newData) 23 | if (this.buffer.byteLength >= this.BUFFER_THRESHOLD) { 24 | this.sendBuffer() 25 | } 26 | } 27 | 28 | async sendBuffer(): Promise { 29 | if (this.buffer.byteLength > 0) { 30 | const bufferToSend = this.buffer 31 | this.buffer = new ArrayBuffer(0) 32 | await this.sendCallback(bufferToSend) 33 | } 34 | } 35 | 36 | async flush(): Promise { 37 | await this.sendBuffer() 38 | } 39 | } 40 | 41 | export function base64ToArrayBuffer(base64: string): ArrayBuffer { 42 | const binaryString = atob(base64) 43 | const len = binaryString.length 44 | const bytes = new Uint8Array(len) 45 | for (let i = 0; i < len; i++) { 46 | bytes[i] = binaryString.charCodeAt(i) 47 | } 48 | 49 | const arrayBuffer = bytes.buffer 50 | if (!validateAudioBuffer(arrayBuffer)) { 51 | console.error('Invalid audio buffer') 52 | return new ArrayBuffer(0) 53 | } 54 | 55 | return arrayBuffer 56 | } 57 | 58 | export function validateAudioBuffer(buffer: ArrayBuffer): boolean { 59 | if (buffer.byteLength < 1024 || buffer.byteLength > 1024 * 1024) { 60 | console.error(`Invalid buffer size: ${buffer.byteLength} bytes`) 61 | return false 62 | } 63 | 64 | if (buffer.byteLength % 2 !== 0) { 65 | console.error('Buffer size is not even, which is required for 16-bit PCM') 66 | return false 67 | } 68 | 69 | const int16Array = new Int16Array(buffer) 70 | const isInValidRange = int16Array.every( 71 | (value) => value >= -32768 && value <= 32767 72 | ) 73 | if (!isInValidRange) { 74 | console.error( 75 | 'Audio data contains values outside the valid range for 16-bit PCM' 76 | ) 77 | return false 78 | } 79 | 80 | return true 81 | } 82 | -------------------------------------------------------------------------------- /src/utils/audioProcessing.ts: -------------------------------------------------------------------------------- 1 | // リサンプリング関数 2 | export const resampleAudio = ( 3 | audioData: Float32Array, 4 | fromSampleRate: number, 5 | toSampleRate: number 6 | ): Float32Array => { 7 | const ratio = fromSampleRate / toSampleRate 8 | const newLength = Math.round(audioData.length / ratio) 9 | const result = new Float32Array(newLength) 10 | 11 | for (let i = 0; i < newLength; i++) { 12 | const position = i * ratio 13 | const leftIndex = Math.floor(position) 14 | const rightIndex = Math.ceil(position) 15 | const fraction = position - leftIndex 16 | 17 | if (rightIndex >= audioData.length) { 18 | result[i] = audioData[leftIndex] 19 | } else { 20 | result[i] = 21 | (1 - fraction) * audioData[leftIndex] + fraction * audioData[rightIndex] 22 | } 23 | } 24 | 25 | return result 26 | } 27 | 28 | // リサンプリングとモノラル変換を行う関数 29 | export const processAudio = (audioBuffer: AudioBuffer): Float32Array => { 30 | const targetSampleRate = 24000 31 | const numChannels = audioBuffer.numberOfChannels 32 | 33 | // モノラルに変換 34 | let monoData = new Float32Array(audioBuffer.length) 35 | for (let i = 0; i < audioBuffer.length; i++) { 36 | let sum = 0 37 | for (let channel = 0; channel < numChannels; channel++) { 38 | sum += audioBuffer.getChannelData(channel)[i] 39 | } 40 | monoData[i] = sum / numChannels 41 | } 42 | 43 | // リサンプリング 44 | return resampleAudio(monoData, audioBuffer.sampleRate, targetSampleRate) 45 | } 46 | 47 | // Float32Array を PCM16 ArrayBuffer に変換する関数 48 | export const floatTo16BitPCM = (float32Array: Float32Array): ArrayBuffer => { 49 | const buffer = new ArrayBuffer(float32Array.length * 2) 50 | const view = new DataView(buffer) 51 | for (let i = 0; i < float32Array.length; i++) { 52 | const s = Math.max(-1, Math.min(1, float32Array[i])) 53 | view.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7fff, true) 54 | } 55 | return buffer 56 | } 57 | 58 | // Float32Array を base64エンコードされた PCM16 データに変換する関数 59 | export const base64EncodeAudio = (float32Array: Float32Array): string => { 60 | const arrayBuffer = floatTo16BitPCM(float32Array) 61 | let binary = '' 62 | const bytes = new Uint8Array(arrayBuffer) 63 | const chunkSize = 0x8000 // 32KB chunk size 64 | for (let i = 0; i < bytes.length; i += chunkSize) { 65 | binary += String.fromCharCode.apply( 66 | null, 67 | Array.from(bytes.subarray(i, i + chunkSize)) 68 | ) 69 | } 70 | return btoa(binary) 71 | } 72 | -------------------------------------------------------------------------------- /src/utils/buildUrl.ts: -------------------------------------------------------------------------------- 1 | import getConfig from 'next/config' 2 | 3 | /** 4 | * github pagesに公開時にアセットを読み込めるようにするため、 5 | * 環境変数を見てURLにリポジトリ名を追加する 6 | */ 7 | export function buildUrl(path: string): string { 8 | const { 9 | publicRuntimeConfig, 10 | }: { 11 | publicRuntimeConfig: { root: string } 12 | } = getConfig() 13 | 14 | // 空白などの特殊文字を含むパスを適切にエンコード 15 | // ただし、パス区切り文字(/)はエンコードしない 16 | const encodedPath = path 17 | .split('/') 18 | .map((segment) => encodeURIComponent(segment)) 19 | .join('/') 20 | 21 | return publicRuntimeConfig.root + encodedPath 22 | } 23 | -------------------------------------------------------------------------------- /src/utils/messageUtils.ts: -------------------------------------------------------------------------------- 1 | import { v4 as uuidv4 } from 'uuid' 2 | 3 | /** 4 | * メッセージ用の一意なIDを生成します。 5 | * @returns 生成されたID (例: "msg_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx") 6 | */ 7 | export const generateMessageId = (): string => `msg_${uuidv4()}` 8 | -------------------------------------------------------------------------------- /src/utils/migrateStore.ts: -------------------------------------------------------------------------------- 1 | import { isLanguageSupported } from '@/features/constants/settings' 2 | import homeStore, { PersistedState } from '@/features/stores/home' 3 | import settingsStore, { SettingsState } from '@/features/stores/settings' 4 | 5 | const migrateStore = () => { 6 | const rawStore = window.localStorage.getItem('chatVRMParams') 7 | if (!rawStore) return 8 | 9 | type Store = Omit & 10 | Pick & { 11 | selectLanguage: string 12 | } 13 | 14 | const store = JSON.parse(rawStore) as Store 15 | 16 | const ss = settingsStore.getState() 17 | const hs = homeStore.getState() 18 | 19 | Object.entries(store).forEach(([k, v]) => { 20 | if (k in ss) { 21 | ;(ss as any)[k] = v 22 | } else if (k in hs) { 23 | ;(hs as any)[k] = v 24 | } 25 | }) 26 | 27 | // selectLanguage migration: follow ISO 639-1 and lowercased, e.g. JP → ja 28 | let lang = ss.selectLanguage.toLowerCase() 29 | lang = lang === 'jp' ? 'ja' : lang 30 | ss.selectLanguage = isLanguageSupported(lang) ? lang : 'ja' 31 | 32 | settingsStore.setState(ss) 33 | homeStore.setState(hs) 34 | 35 | window.localStorage.removeItem('chatVRMParams') 36 | } 37 | export default migrateStore 38 | -------------------------------------------------------------------------------- /src/utils/modelMigration.ts: -------------------------------------------------------------------------------- 1 | // Legacy OpenAI model names with date suffixes 2 | const LEGACY_OPENAI_MODELS: Record = { 3 | 'gpt-4o-mini-2024-07-18': 'gpt-4o-mini', 4 | 'gpt-4o-2024-11-20': 'gpt-4o', 5 | 'gpt-4.5-preview-2025-02-27': 'gpt-4.5-preview', 6 | 'gpt-4.1-nano-2025-04-14': 'gpt-4.1-nano', 7 | 'gpt-4.1-mini-2025-04-14': 'gpt-4.1-mini', 8 | 'gpt-4.1-2025-04-14': 'gpt-4.1', 9 | } 10 | 11 | // Migrate OpenAI model names from old format to new format 12 | export const migrateOpenAIModelName = (modelName: string): string => { 13 | return LEGACY_OPENAI_MODELS[modelName] || modelName 14 | } 15 | -------------------------------------------------------------------------------- /src/utils/reduceTalkStyle.ts: -------------------------------------------------------------------------------- 1 | // /* koeiromap Free v1の制限に対応した声色 */ 2 | // type ReducedTalkStyle = 'talk' | 'happy' | 'sad' 3 | 4 | // /** 5 | // * koeiromap Free v1用に声色パラメータを制限する 6 | // */ 7 | // export const reduceTalkStyle = (talkStyle: string): ReducedTalkStyle => { 8 | // if (talkStyle == 'talk' || talkStyle == 'happy' || talkStyle == 'sad') { 9 | // return talkStyle 10 | // } 11 | 12 | // return 'talk' 13 | // } 14 | -------------------------------------------------------------------------------- /src/utils/textProcessing.ts: -------------------------------------------------------------------------------- 1 | import englishToJapanese from '@/utils/englishToJapanese.json' 2 | 3 | interface EnglishToJapanese { 4 | [key: string]: string 5 | } 6 | 7 | const typedEnglishToJapanese = englishToJapanese as EnglishToJapanese 8 | 9 | // ソート済みキーをあらかじめメモ化 10 | const sortedEnglishKeys = Object.keys(typedEnglishToJapanese).sort( 11 | (a, b) => b.length - a.length 12 | ) 13 | 14 | // 重要な単語を明示的に含める 15 | const importantWords = ['mastra', 'Mastra'] 16 | 17 | // 最適化: 10文字以下の単語と重要な単語を含める 18 | const commonWordsKeys = [ 19 | ...sortedEnglishKeys.filter((key) => key.length <= 10), 20 | ...importantWords, 21 | ] 22 | 23 | // 正規表現をあらかじめコンパイルして再利用 24 | const regexCache = new Map() 25 | 26 | /** 27 | * 英語テキストを日本語読みに変換する 28 | * @param text 変換元の文字列 29 | * @returns 変換後の文字列 30 | */ 31 | export function convertEnglishToJapaneseReading(text: string): string { 32 | // 大文字小文字を区別せずに変換するために、テキストを小文字化してキャッシュ 33 | const lowerText = text.toLowerCase() 34 | 35 | // メモ化されたキーを使用 36 | return commonWordsKeys.reduce((result, englishWord) => { 37 | // 最適化: 大文字小文字を区別せずに単語の有無をチェック 38 | if (!lowerText.includes(englishWord.toLowerCase())) { 39 | return result 40 | } 41 | 42 | // 正規表現のキャッシュを利用 43 | let regex = regexCache.get(englishWord) 44 | if (!regex) { 45 | // 大文字小文字を区別せず、単語境界に一致する正規表現 46 | regex = new RegExp(`\\b${englishWord}\\b`, 'gi') 47 | regexCache.set(englishWord, regex) 48 | } 49 | 50 | const japaneseReading = typedEnglishToJapanese[englishWord] 51 | return result.replace(regex, japaneseReading) 52 | }, text) 53 | } 54 | 55 | /** 56 | * 非同期で英語テキストを日本語読みに変換する 57 | * UIスレッドをブロックしないように設計 58 | * @param text 変換元の文字列 59 | * @returns 変換後の文字列を含むPromise 60 | */ 61 | export async function asyncConvertEnglishToJapaneseReading( 62 | text: string 63 | ): Promise { 64 | // UIスレッドをブロックしないよう、次のティックまで待機 65 | await new Promise((resolve) => setTimeout(resolve, 0)) 66 | 67 | return convertEnglishToJapaneseReading(text) 68 | } 69 | 70 | /** 71 | * テキスト内に英語(ラテン文字)が含まれているかチェック 72 | * @param text チェック対象のテキスト 73 | * @returns 英語が含まれている場合はtrue 74 | */ 75 | export function containsEnglish(text: string): boolean { 76 | return /[a-zA-Z]/.test(text) 77 | } 78 | -------------------------------------------------------------------------------- /src/utils/voiceLanguage.ts: -------------------------------------------------------------------------------- 1 | import { VoiceLanguage } from '@/features/constants/settings' 2 | 3 | // 言語コードから音声認識用の言語コードに変換する関数 4 | export const getVoiceLanguageCode = (selectLanguage: string): VoiceLanguage => { 5 | switch (selectLanguage) { 6 | case 'ja': 7 | return 'ja-JP' 8 | case 'en': 9 | return 'en-US' 10 | case 'ko': 11 | return 'ko-KR' 12 | case 'zh': 13 | return 'zh-TW' 14 | case 'vi': 15 | return 'vi-VN' 16 | case 'fr': 17 | return 'fr-FR' 18 | case 'es': 19 | return 'es-ES' 20 | case 'pt': 21 | return 'pt-PT' 22 | case 'de': 23 | return 'de-DE' 24 | case 'ru': 25 | return 'ru-RU' 26 | case 'it': 27 | return 'it-IT' 28 | case 'ar': 29 | return 'ar-SA' 30 | case 'hi': 31 | return 'hi-IN' 32 | case 'pl': 33 | return 'pl-PL' 34 | case 'th': 35 | return 'th-TH' 36 | default: 37 | return 'ja-JP' 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/utils/wait.ts: -------------------------------------------------------------------------------- 1 | export const wait = async (ms: number) => 2 | new Promise((resolve) => setTimeout(resolve, ms)) 3 | -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | darkMode: 'class', 4 | content: ['./src/**/*.{js,ts,jsx,tsx}'], 5 | theme: { 6 | extend: { 7 | colors: { 8 | primary: '#856292', 9 | 'primary-hover': '#8E76A1', 10 | 'primary-press': '#988BB0', 11 | 'primary-disabled': '#6F48694D', 12 | secondary: '#FF617F', 13 | 'secondary-hover': '#FF849B', 14 | 'secondary-press': '#FF9EB1', 15 | 'secondary-disabled': '#FF617F4D', 16 | 'text-primary': '#514062', 17 | 'base-light': '#FBE2CA', 18 | 'base-dark': '#332D2D', 19 | 20 | // トースト用のより鮮明な色定義 21 | 'toast-info': '#007BFF', 22 | 'toast-info-hover': '#0056B3', 23 | 'toast-error': '#DC3545', 24 | 'toast-error-hover': '#BD2130', 25 | 'toast-success': '#28A745', 26 | 'toast-success-hover': '#218838', 27 | 'toast-tool': '#9C27B0', 28 | 'toast-tool-hover': '#7B1FA2', 29 | }, 30 | fontFamily: { 31 | M_PLUS_2: ['Montserrat', 'M_PLUS_2', 'sans-serif'], 32 | Montserrat: ['Montserrat', 'sans-serif'], 33 | }, 34 | zIndex: { 35 | 5: '5', 36 | 15: '15', 37 | }, 38 | width: { 39 | 'col-span-2': '184px', 40 | 'col-span-4': '392px', 41 | 'col-span-7': '704px', 42 | }, 43 | }, 44 | }, 45 | plugins: [], 46 | } 47 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2015", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "bundler", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "paths": { 18 | "@/*": ["./src/*"] 19 | }, 20 | "typeRoots": ["./node_modules/@types", "./src/types"], 21 | "plugins": [ 22 | { 23 | "name": "next" 24 | } 25 | ] 26 | }, 27 | "include": [ 28 | "next-env.d.ts", 29 | "**/*.ts", 30 | "**/*.tsx", 31 | ".next/types/**/*.ts", 32 | "src/types/**/*.d.ts" 33 | ], 34 | "exclude": ["node_modules", ".mypy_cache", "scripts"] 35 | } 36 | -------------------------------------------------------------------------------- /watch.json: -------------------------------------------------------------------------------- 1 | { 2 | "install": { 3 | "include": ["package.json"] 4 | }, 5 | "restart": { 6 | "include": [".env", "next.config.js"] 7 | }, 8 | "throttle": 250 9 | } 10 | --------------------------------------------------------------------------------