├── .env.example ├── .github ├── FUNDING.yml └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .vscode └── settings.json ├── Backend ├── classes │ ├── Shorts.py │ └── instagram_downloader.py ├── gpt.py ├── main.py ├── search.py ├── settings.py ├── static │ ├── assets │ │ ├── fonts │ │ │ ├── .gitKeep │ │ │ └── bold_font.ttf │ │ ├── images │ │ │ ├── Screen1.png │ │ │ ├── Screenshot2.png │ │ │ └── Screenshot3.png │ │ └── music │ │ │ └── .gitKeep │ └── generated_videos │ │ └── .gitKeep ├── tiktokvoice.py ├── utils.py ├── video.py └── youtube.py ├── Dockerfile ├── Dockerfile.FE ├── Dockerfile.FE.Nuxt ├── EnvironmentVariables.md ├── Frontend ├── app.js └── index.html ├── LICENSE ├── README.md ├── UI ├── .gitignore ├── .npmrc ├── .vscode │ └── settings.json ├── README.md ├── app.config.ts ├── app.vue ├── assets │ └── scss │ │ ├── helpers │ │ └── _transition.scss │ │ └── main.scss ├── components │ ├── ActionIcon.vue │ ├── AllSettings.vue │ ├── ErrorView.vue │ ├── GenerateScript.vue │ ├── HeaderLayout.vue │ ├── InstagramVideos.vue │ ├── LayoutTabs.vue │ ├── MultiStepLoader.vue │ ├── MusicSettings.vue │ ├── NaiveLayoutSidebar.vue │ ├── RedirectView.vue │ ├── SearchDialog.vue │ ├── SearchTrigger.vue │ ├── SubtitleSettings.vue │ ├── ToolTipper.vue │ ├── VideoSearch.vue │ ├── VideoSelected.vue │ ├── VideosTable.vue │ ├── VoiceSettings.vue │ └── instagram.vue ├── composables │ ├── useGlobalSettings.ts │ ├── useMenuSetting.ts │ ├── useSearchDialog.ts │ ├── useTabs.ts │ └── useVideoSetings.ts ├── content │ └── docs │ │ ├── how-to-use.md │ │ ├── index.md │ │ └── road-map.md ├── i18n │ └── locales │ │ └── en-US.json ├── layouts │ └── default.vue ├── nuxt.config.ts ├── package.json ├── pages │ ├── docs │ │ └── [...slug].vue │ ├── generate │ │ └── index.vue │ ├── index.vue │ ├── search.vue │ ├── settings.vue │ └── videos │ │ └── index.vue ├── pnpm-lock.yaml ├── public │ └── favicon.ico ├── server │ └── tsconfig.json ├── stores │ ├── AppStore.ts │ └── TabsStore.ts ├── tailwind.config.ts ├── tsconfig.json ├── types │ ├── Menu │ │ └── index.ts │ ├── Project │ │ └── Settings.ts │ └── Search │ │ └── index.ts ├── uno.config.ts └── utils │ ├── PlatformUtils.ts │ ├── RouteHelpers.ts │ ├── ScreenUtils.ts │ └── mitt.ts ├── docker-compose.yml ├── logo.jpeg └── requirements.txt /.env.example: -------------------------------------------------------------------------------- 1 | # See EnvironmentVariables.md for more information. 2 | 3 | # Necessary API Keys 4 | # ------------------- 5 | 6 | # TikTok Session ID 7 | # Obtain your session ID by logging into TikTok and copying the sessionid cookie. 8 | TIKTOK_SESSION_ID="" 9 | 10 | # ImageMagick Binary Path 11 | # Download ImageMagick from https://imagemagick.org/script/download.php 12 | IMAGEMAGICK_BINARY="/usr/bin/convert" 13 | 14 | # Pexels API Key 15 | # Register at https://www.pexels.com/api/ to get your API key. 16 | PEXELS_API_KEY="" 17 | 18 | # Optional API Keys 19 | # ----------------- 20 | 21 | # OpenAI API Key 22 | # Visit https://openai.com/api/ for details on obtaining an API key. 23 | OPENAI_API_KEY="" 24 | 25 | # AssemblyAI API Key 26 | # Sign up at https://www.assemblyai.com/ to receive an API key. 27 | ASSEMBLY_AI_API_KEY="" 28 | 29 | # Google API Key 30 | # Generate your API key through https://makersuite.google.com/app/apikey 31 | GOOGLE_API_KEY="" 32 | 33 | # Front end port 34 | FE_PORT=3000 35 | # Alternate front end port 36 | FE_NUXT=5000 37 | # Backend port 38 | API_PORT=8080 39 | 40 | API_URL='http://localhost:8080/api' -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [leamsigc] 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: leamsigc 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. Linux, Windows] 28 | - Browser [e.g. chrome, edge] 29 | - Python Version [e.g. 3.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: 'leamsigc' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | temp/* 4 | sounds/* 5 | output/* 6 | images/* 7 | *.zip 8 | *.srt 9 | *.mp4 10 | *.mp3 11 | .history 12 | subtitles/* 13 | /venv 14 | client_secret.json 15 | main.py-oauth2.json 16 | .DS_Store 17 | Backend/output* 18 | Songs/ 19 | node_modules 20 | /UI/.nuxt -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "node_modules/typescript/lib", 3 | "i18n-ally.localesPaths": ["UI/locales"], 4 | "i18n-ally.keystyle": "nested" 5 | } 6 | -------------------------------------------------------------------------------- /Backend/classes/Shorts.py: -------------------------------------------------------------------------------- 1 | import os 2 | from utils import * 3 | 4 | from settings import * 5 | from gpt import * 6 | from search import * 7 | from termcolor import colored 8 | from flask import jsonify,json 9 | from video import * 10 | from tiktokvoice import * 11 | from uuid import uuid4 12 | from apiclient.errors import HttpError 13 | from moviepy.config import change_settings 14 | 15 | class Shorts: 16 | """ 17 | Class for creating VideoShorts. 18 | 19 | Steps to create a Video Short: 20 | 1. Generate a script [DONE] 21 | 2. Generate metadata (Title, Description, Tags) [DONE] 22 | 3. Get subtitles [DONE] 23 | 4. Get Videos related to the search term [DONE] 24 | 5. Convert Text-to-Speech [DONE] 25 | 6. Combine Videos [DONE] 26 | 7. Combine Videos with the Text-to-Speech [DONE] 27 | 7. Combine Videos with the Text-to-Speech [DONE] 28 | """ 29 | def __init__(self,video_subject: str, paragraph_number: int, ai_model: str,customPrompt: str="", extra_prompt: str = ""): 30 | """ 31 | Constructor for YouTube Class. 32 | 33 | Args: 34 | video_subject (str): The subject of the video. 35 | paragraph_number (int): The number of paragraphs to generate. 36 | ai_model (str): The AI model to use for generation. 37 | customPrompt (str): The custom prompt to use for generation. 38 | extra_prompt (str): The extra prompt to use for generation. 39 | 40 | Returns: 41 | None 42 | """ 43 | global GENERATING 44 | GENERATING = True 45 | 46 | 47 | change_settings({"IMAGEMAGICK_BINARY": os.getenv("IMAGEMAGICK_BINARY")}) 48 | 49 | 50 | self.video_subject = video_subject 51 | self.paragraph_number = paragraph_number 52 | self.ai_model = ai_model 53 | self.customPrompt = customPrompt 54 | self.extra_prompt = extra_prompt 55 | self.globalSettings = get_settings() 56 | 57 | 58 | # Generate a script 59 | self.final_script = "" 60 | self.search_terms = [] 61 | self.AMOUNT_OF_STOCK_VIDEOS= 5 62 | 63 | # Video from pexels 64 | self.video_urls = [] 65 | self.video_paths = [] 66 | self.videos_quantity_search = 15 67 | self.min_duration_search = 5 68 | # Voice related variables 69 | self.voice = "en_us_001" 70 | self.voice_prefix = self.voice[:2] 71 | 72 | # Audio and subtitles 73 | self.tts_path = None 74 | self.subtitles_path = None 75 | 76 | # Final video 77 | self.final_video_path = None 78 | 79 | # Video metadata 80 | self.video_title = None 81 | self.video_description = None 82 | self.video_tags = None 83 | 84 | # Subtitle 85 | self.subtitles_position="" 86 | self.final_music_video_path="" 87 | 88 | @property 89 | def get_final_video_path(self): 90 | return self.final_video_path 91 | @property 92 | def get_final_music_video_path(self): 93 | return self.final_music_video_path 94 | 95 | @property 96 | def get_final_script(self): 97 | return self.final_script 98 | 99 | @property 100 | def get_tts_path(self): 101 | return self.tts_path 102 | 103 | @property 104 | def get_subtitles_path(self): 105 | return self.subtitles_path 106 | 107 | @property 108 | def get_video_paths(self): 109 | return self.video_paths 110 | 111 | def GenerateScript(self): 112 | """ 113 | Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model. 114 | 115 | Args: 116 | video_subject (str): The subject of the video. 117 | paragraph_number (int): The number of paragraphs to generate. 118 | ai_model (str): The AI model to use for generation. 119 | Returns: 120 | 121 | str: The script for the video. 122 | """ 123 | 124 | if self.customPrompt and self.customPrompt != "": 125 | prompt = self.customPrompt 126 | else: 127 | prompt = self.globalSettings["scriptSettings"]["defaultPromptStart"] 128 | 129 | prompt += f""" 130 | # Initialization: 131 | - video subject: {self.video_subject} 132 | - number of paragraphs: {self.paragraph_number} 133 | {self.extra_prompt} 134 | 135 | """ 136 | # Add the global prompt end 137 | prompt += self.globalSettings["scriptSettings"]["defaultPromptEnd"] 138 | 139 | # Generate script 140 | response = generate_response(prompt, self.ai_model) 141 | 142 | print(colored(response, "cyan")) 143 | 144 | # Return the generated script 145 | if response: 146 | # Clean the script 147 | # Remove asterisks, hashes 148 | response = response.replace("*", "") 149 | response = response.replace("#", "") 150 | 151 | # Remove markdown syntax 152 | response = re.sub(r"\[.*\]", "", response) 153 | response = re.sub(r"\(.*\)", "", response) 154 | 155 | # Split the script into paragraphs 156 | paragraphs = response.split("\n\n") 157 | 158 | # Select the specified number of paragraphs 159 | selected_paragraphs = paragraphs[:self.paragraph_number] 160 | 161 | # Join the selected paragraphs into a single string 162 | final_script = "\n\n".join(selected_paragraphs) 163 | 164 | # Print to console the number of paragraphs used 165 | print(colored(f"Number of paragraphs used: {len(selected_paragraphs)}", "green")) 166 | 167 | self.final_script = final_script 168 | 169 | return final_script 170 | else: 171 | print(colored("[-] GPT returned an empty response.", "red")) 172 | return None 173 | 174 | def GenerateSearchTerms(self): 175 | self.search_terms = get_search_terms(self.video_subject, self.AMOUNT_OF_STOCK_VIDEOS, self.final_script, self.ai_model) 176 | 177 | return self.search_terms 178 | 179 | #Download the videos base on the search terms from pexel api 180 | def DownloadVideos(self, selectedVideoUrls): 181 | global GENERATING 182 | 183 | # Search for videos 184 | # Check if the selectedVideoUrls is empty 185 | if selectedVideoUrls and len(selectedVideoUrls) > 0: 186 | print(colored(f"Selected videos: {selectedVideoUrls}", "green")) 187 | # filter the selectedVideoUrls is a Array of objects with videoUrl object that has a link key with a value we use the value of the link key 188 | self.video_urls = [video_url["videoUrl"]["link"] for video_url in selectedVideoUrls] 189 | # log the selectedVideoUrls 190 | print(colored(f"Selected video urls: {self.video_urls}", "green")) 191 | else: 192 | for search_term in self.search_terms: 193 | global GENERATING 194 | if not GENERATING: 195 | return jsonify( 196 | { 197 | "status": "error", 198 | "message": "Video generation was cancelled.", 199 | "data": [], 200 | } 201 | ) 202 | found_urls = search_for_stock_videos( 203 | search_term, os.getenv("PEXELS_API_KEY"), self.videos_quantity_search, self.min_duration_search 204 | ) 205 | # check if found_urls is empty 206 | # Check for duplicates 207 | for url in found_urls: 208 | if url not in self.video_urls: 209 | self.video_urls.append(url) 210 | break 211 | 212 | # Check if video_urls is empty 213 | if not self.video_urls: 214 | print(colored("[-] No videos found to download.", "red")) 215 | return jsonify( 216 | { 217 | "status": "error", 218 | "message": "No videos found to download.", 219 | "data": [], 220 | } 221 | ) 222 | 223 | # Download the videos 224 | video_paths = [] 225 | # Let user know 226 | print(colored(f"[+] Downloading {len(self.video_urls)} videos...", "blue")) 227 | # Save the videos 228 | for video_url in self.video_urls: 229 | if not GENERATING: 230 | return jsonify( 231 | { 232 | "status": "error", 233 | "message": "Video generation was cancelled.", 234 | "data": [], 235 | } 236 | ) 237 | try: 238 | saved_video_path = save_video(video_url) 239 | print(colored(f"[+] Saved video: {saved_video_path}", "green")) 240 | video_paths.append(saved_video_path) 241 | except Exception: 242 | print(colored(f"[-] Could not download video: {video_url}", "red")) 243 | 244 | # Let user know 245 | print(colored("[+] Videos downloaded!", "green")) 246 | self.video_paths = video_paths 247 | # print the video_paths 248 | print(colored(f"Video paths: {self.video_paths}", "green")) 249 | 250 | 251 | def GenerateMetadata(self): 252 | self.video_title, self.video_description, self.video_tags = generate_metadata(self.video_subject, self.final_script, self.ai_model) 253 | 254 | # Write the metadata in a json file with the video title as the filename 255 | self.WriteMetadataToFile(self.video_title, self.video_description, self.video_tags) 256 | 257 | def GenerateVoice(self,voice): 258 | print(colored(f"[X] Generating voice: {voice} ", "green")) 259 | global GENERATING 260 | self.voice = voice 261 | self.voice_prefix = self.voice[:2] 262 | 263 | # Split script into sentences 264 | sentences = self.final_script.split(". ") 265 | 266 | # Remove empty strings 267 | sentences = list(filter(lambda x: x != "", sentences)) 268 | paths = [] 269 | 270 | # Generate TTS for every sentence 271 | for sentence in sentences: 272 | if not GENERATING: 273 | return jsonify( 274 | { 275 | "status": "error", 276 | "message": "Video generation was cancelled.", 277 | "data": [], 278 | } 279 | ) 280 | fileId = uuid4() 281 | current_tts_path = os.path.join("static/assets/temp", f"{fileId}.mp3") 282 | tts(sentence, self.voice, filename=current_tts_path) 283 | 284 | # Add the audio clip to the list 285 | print(colored(f"[X] Save Audio ", "green")) 286 | audio_clip = AudioFileClip(os.path.join("static/assets/temp", f"{fileId}.mp3")) 287 | paths.append(audio_clip) 288 | 289 | # Combine all TTS files using moviepy 290 | 291 | print(colored(f"[X] Start saving the audio ", "green")) 292 | final_audio = concatenate_audioclips(paths) 293 | self.tts_path = os.path.join("static/assets/temp", f"{uuid4()}.mp3") 294 | final_audio.write_audiofile(self.tts_path) 295 | 296 | # Generate the subtitles 297 | try: 298 | self.subtitles_path = generate_subtitles(audio_path=self.tts_path, sentences=sentences, audio_clips=paths, voice=self.voice_prefix) 299 | except Exception as e: 300 | print(colored(f"[-] Error generating subtitles: {e}", "red")) 301 | self.subtitles_path = None 302 | 303 | def CombineVideos(self): 304 | temp_audio = AudioFileClip(self.tts_path) 305 | n_threads = 2 306 | combined_video_path = combine_videos(self.video_paths, temp_audio.duration, 10, n_threads or 2) 307 | 308 | print(colored(f"[-] Next step: {combined_video_path}", "green")) 309 | # Put everything together 310 | try: 311 | self.final_video_path = generate_video(combined_video_path, self.tts_path, self.subtitles_path, n_threads or 2, self.subtitles_position) 312 | except Exception as e: 313 | print(colored(f"[-] Error generating final video: {e}", "red")) 314 | self.final_video_path = None 315 | 316 | def WriteMetadataToFile(video_title, video_description, video_tags): 317 | metadata = { 318 | "title": video_title, 319 | "description": video_description, 320 | "tags": video_tags 321 | } 322 | # Remplace spaces with underscores 323 | fileName = video_title.replace(" ", "_") 324 | 325 | with open(os.path.join("static/generated_videos", f"{fileName}.json"), "w") as file: 326 | json.dump(metadata, file) 327 | 328 | def AddMusic(self, use_music,custom_song_path=""): 329 | video_clip = VideoFileClip(f"{self.final_video_path}") 330 | 331 | self.final_music_video_path = f"{uuid4()}-music.mp4" 332 | n_threads = 2 333 | if use_music: 334 | # if no song path choose random song 335 | song_path = os.path.join("static/assets/music", custom_song_path) 336 | if not custom_song_path: 337 | song_path = choose_random_song() 338 | 339 | 340 | # Add song to video at 30% volume using moviepy 341 | original_duration = video_clip.duration 342 | original_audio = video_clip.audio 343 | song_clip = AudioFileClip(song_path).set_fps(44100) 344 | 345 | # Set the volume of the song to 10% of the original volume 346 | song_clip = song_clip.volumex(0.1).set_fps(44100) 347 | 348 | # Add the song to the video 349 | comp_audio = CompositeAudioClip([original_audio, song_clip]) 350 | video_clip = video_clip.set_audio(comp_audio) 351 | video_clip = video_clip.set_fps(30) 352 | video_clip = video_clip.set_duration(original_duration) 353 | 354 | video_clip.write_videofile(os.path.join("static/generated_videos", self.final_music_video_path), threads=n_threads or 1) 355 | else: 356 | video_clip.write_videofile(os.path.join("static/generated_videos", self.final_music_video_path), threads=n_threads or 1) 357 | 358 | def Stop(self): 359 | global GENERATING 360 | # Stop FFMPEG processes 361 | if os.name == "nt": 362 | # Windows 363 | os.system("taskkill /f /im ffmpeg.exe") 364 | else: 365 | # Other OS 366 | os.system("pkill -f ffmpeg") 367 | 368 | GENERATING = False -------------------------------------------------------------------------------- /Backend/classes/instagram_downloader.py: -------------------------------------------------------------------------------- 1 | import yt_dlp 2 | import os 3 | from typing import Optional, Dict, Any 4 | from datetime import datetime 5 | 6 | class InstagramDownloader: 7 | def __init__(self, output_path: str = "downloads"): 8 | """ 9 | Initialize the Instagram video downloader 10 | 11 | Args: 12 | output_path (str): Directory where videos will be saved 13 | """ 14 | self.output_path = output_path 15 | self._create_output_directory() 16 | 17 | # Configure yt-dlp options 18 | self.ydl_opts = { 19 | 'format': 'best', # Download best quality 20 | 'outtmpl': os.path.join(self.output_path, '%(id)s.%(ext)s'), 21 | 'quiet': False, 22 | 'no_warnings': False, 23 | 'extract_flat': False, 24 | } 25 | 26 | def _create_output_directory(self) -> None: 27 | """Create the output directory if it doesn't exist""" 28 | os.makedirs(self.output_path, exist_ok=True) 29 | 30 | def download_video(self, url: str) -> Dict[str, Any]: 31 | """ 32 | Download a video from Instagram 33 | 34 | Args: 35 | url (str): Instagram video URL 36 | 37 | Returns: 38 | Dict[str, Any]: Information about the downloaded video 39 | 40 | Raises: 41 | Exception: If download fails 42 | """ 43 | try: 44 | with yt_dlp.YoutubeDL(self.ydl_opts) as ydl: 45 | # Extract video information 46 | info = ydl.extract_info(url, download=True) 47 | 48 | return { 49 | 'title': info.get('title', ''), 50 | 'filename': ydl.prepare_filename(info), 51 | 'duration': info.get('duration'), 52 | 'thumbnail': info.get('thumbnail'), 53 | 'download_time': datetime.now().isoformat(), 54 | 'status': 'success' 55 | } 56 | 57 | except Exception as e: 58 | error_info = { 59 | 'status': 'error', 60 | 'error_message': str(e), 61 | 'url': url, 62 | 'time': datetime.now().isoformat() 63 | } 64 | raise Exception(f"Failed to download video: {str(e)}") from e 65 | 66 | def update_options(self, new_options: Dict[str, Any]) -> None: 67 | """ 68 | Update yt-dlp options 69 | 70 | Args: 71 | new_options (Dict[str, Any]): New options to update 72 | """ 73 | self.ydl_opts.update(new_options) 74 | 75 | def set_output_template(self, template: str) -> None: 76 | """ 77 | Set custom output template for downloaded files 78 | 79 | Args: 80 | template (str): Output template string 81 | """ 82 | self.ydl_opts['outtmpl'] = os.path.join(self.output_path, template) 83 | -------------------------------------------------------------------------------- /Backend/gpt.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import g4f 4 | # from openai import OpenAI 5 | from typing import Tuple, List 6 | from termcolor import colored 7 | from dotenv import load_dotenv 8 | import os 9 | import google.generativeai as genai 10 | 11 | # Load environment variables 12 | if os.path.exists(".env"): 13 | load_dotenv(".env") 14 | else: 15 | load_dotenv("../.env") 16 | 17 | # Set environment variables 18 | OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') 19 | # openai.api_key = OPENAI_API_KEY 20 | GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') 21 | genai.configure(api_key=GOOGLE_API_KEY) 22 | 23 | # openaiClient = OpenAI( 24 | # api_key=OPENAI_API_KEY , # This is the default and can be omitted 25 | # ) 26 | 27 | # Configure g4f 28 | g4f.debug.logging = True # Enable debug logging 29 | g4f.debug.version_check = False # Disable automatic version checking 30 | 31 | def generate_response(prompt: str, ai_model: str) -> str: 32 | """ 33 | Generate a script for a video, depending on the subject of the video. 34 | 35 | Args: 36 | video_subject (str): The subject of the video. 37 | ai_model (str): The AI model to use for generation. 38 | 39 | Returns: 40 | str: The response from the AI model. 41 | """ 42 | 43 | if ai_model == 'g4f': 44 | client = g4f.Client() 45 | response = client.chat.completions.create( 46 | model="gpt-4o-mini", 47 | messages=[{"role": "user", "content": prompt}], 48 | stream=False 49 | # Add any other necessary parameters 50 | ) 51 | return response if isinstance(response, str) else str(response.choices[0].message.content) 52 | 53 | # elif ai_model in ["gpt3.5-turbo", "gpt4"]: 54 | 55 | # model_name = "gpt-3.5-turbo" if ai_model == "gpt3.5-turbo" else "gpt-4-1106-preview" 56 | # response = openaiClient.chat.completions.create( 57 | # model=model_name, 58 | # messages=[{"role": "user", "content": prompt}], 59 | # ).choices[0].message.content 60 | 61 | elif ai_model == 'gemmini': 62 | model = genai.GenerativeModel('gemini-pro') 63 | response_model = model.generate_content(prompt) 64 | response = response_model.text 65 | 66 | else: 67 | raise ValueError("Invalid AI model selected.") 68 | 69 | return response 70 | 71 | 72 | 73 | def get_search_terms(video_subject: str, amount: int, script: str, ai_model: str) -> List[str]: 74 | """ 75 | Generate a JSON-Array of search terms for stock videos, 76 | depending on the subject of a video. 77 | 78 | Args: 79 | video_subject (str): The subject of the video. 80 | amount (int): The amount of search terms to generate. 81 | script (str): The script of the video. 82 | ai_model (str): The AI model to use for generation. 83 | 84 | Returns: 85 | List[str]: The search terms for the video subject. 86 | """ 87 | 88 | # Build prompt 89 | prompt = f""" 90 | # Role: Video Search Terms Generator 91 | ## Goals: 92 | Generate {amount} search terms for stock videos, depending on the subject of a video. 93 | 94 | ## Constrains: 95 | 1. the search terms are to be returned as a json-array of strings. 96 | 2. each search term should consist of 1-3 words, always add the main subject of the video. 97 | 3. you must only return the json-array of strings. you must not return anything else. you must not return the script. 98 | 4. the search terms must be related to the subject of the video. 99 | 5. reply with english search terms only. 100 | 101 | ## Output Example: 102 | ["search term 1", "search term 2", "search term 3","search term 4","search term 5"] 103 | 104 | ## Context: 105 | ### Video Subject 106 | {video_subject} 107 | 108 | ### Video Script 109 | {script} 110 | 111 | Please note that you must use English for generating video search terms; Chinese is not accepted. 112 | """.strip() 113 | 114 | 115 | # Let user know 116 | print(colored(f"Generating {amount} search terms for {video_subject}...", "cyan")) 117 | 118 | # Generate search terms 119 | response = generate_response(prompt, ai_model) 120 | 121 | # Let user know 122 | print(colored(f"Response: {response}", "cyan")) 123 | # Parse response into a list of search terms 124 | search_terms = [] 125 | 126 | try: 127 | search_terms = json.loads(response) 128 | if not isinstance(search_terms, list) or not all(isinstance(term, str) for term in search_terms): 129 | raise ValueError("Response is not a list of strings.") 130 | 131 | except (json.JSONDecodeError, ValueError): 132 | print(colored("[*] GPT returned an unformatted response. Attempting to clean...", "yellow")) 133 | 134 | # Attempt to extract list-like string and convert to list 135 | match = re.search(r'\["(?:[^"\\]|\\.)*"(?:,\s*"[^"\\]*")*\]', response) 136 | if match: 137 | try: 138 | search_terms = json.loads(match.group()) 139 | except json.JSONDecodeError: 140 | print(colored("[-] Could not parse response.", "red")) 141 | return [] 142 | 143 | 144 | 145 | # Let user know 146 | print(colored(f"\nGenerated {len(search_terms)} search terms: {', '.join(search_terms)}", "cyan")) 147 | 148 | # Return search terms 149 | return search_terms 150 | 151 | 152 | def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]: 153 | """ 154 | Generate metadata for a YouTube video, including the title, description, and keywords. 155 | 156 | Args: 157 | video_subject (str): The subject of the video. 158 | script (str): The script of the video. 159 | ai_model (str): The AI model to use for generation. 160 | 161 | Returns: 162 | Tuple[str, str, List[str]]: The title, description, and keywords for the video. 163 | """ 164 | 165 | # Build prompt for title 166 | title_prompt = f""" 167 | Generate a catchy and SEO-friendly title for a YouTube shorts video about {video_subject}. 168 | """ 169 | 170 | # Generate title 171 | title = generate_response(title_prompt, ai_model).strip() 172 | 173 | # Build prompt for description 174 | description_prompt = f""" 175 | Write a brief and engaging description for a YouTube shorts video about {video_subject}. 176 | The video is based on the following script: 177 | {script} 178 | """ 179 | 180 | # Generate description 181 | description = generate_response(description_prompt, ai_model).strip() 182 | 183 | # Generate keywords 184 | keywords = get_search_terms(video_subject, 6, script, ai_model) 185 | 186 | return title, description, keywords 187 | -------------------------------------------------------------------------------- /Backend/search.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from typing import List 4 | from termcolor import colored 5 | 6 | def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]: 7 | """ 8 | Searches for stock videos based on a query. 9 | 10 | Args: 11 | query (str): The query to search for. 12 | api_key (str): The API key to use. 13 | 14 | Returns: 15 | List[str]: A list of stock videos. 16 | """ 17 | 18 | # Build headers 19 | headers = { 20 | "Authorization": api_key 21 | } 22 | 23 | # Build URL 24 | qurl = f"https://api.pexels.com/videos/search?query={query}&per_page={it}" 25 | 26 | # Send the request 27 | r = requests.get(qurl, headers=headers) 28 | 29 | # log response 30 | print(colored(f"Response: {r.status_code}", "green")) 31 | print(colored(f"Response: {r}", "green")) 32 | 33 | # Parse the response 34 | response = r 35 | 36 | # Parse each video 37 | raw_urls = [] 38 | video_url = [] 39 | video_res = 0 40 | try: 41 | # loop through each video in the result 42 | for i in range(it): 43 | #check if video has desired minimum duration 44 | if response["videos"][i]["duration"] < min_dur: 45 | continue 46 | raw_urls = response["videos"][i]["video_files"] 47 | 48 | 49 | temp_video_url = "" 50 | 51 | # loop through each url to determine the best quality 52 | for video in raw_urls: 53 | # Check if video has a valid download link 54 | if ".com" in video["link"]: 55 | # Only save the URL with the largest resolution 56 | if (video["width"]*video["height"]) > video_res: 57 | temp_video_url = video["link"] 58 | video_res = video["width"]*video["height"] 59 | 60 | # add the url to the return list if it's not empty 61 | print(video["link"]) 62 | print(temp_video_url) 63 | if temp_video_url != "": 64 | video_url.append(temp_video_url) 65 | 66 | except Exception as e: 67 | print(colored("[-] No Videos found.", "red")) 68 | print(colored(e, "red")) 69 | 70 | # Let user know 71 | print(colored(f"\t=> \"{query}\" found {len(video_url)} Videos", "cyan")) 72 | 73 | # Return the video url 74 | return video_url 75 | -------------------------------------------------------------------------------- /Backend/settings.py: -------------------------------------------------------------------------------- 1 | # Create global settings to save the following 2 | 3 | 4 | fontSettings = { 5 | "font": "static/assets/fonts/bold_font.ttf", 6 | "fontsize": 100, 7 | "color": "#FFFF00", 8 | "stroke_color": "black", 9 | "stroke_width": 5, 10 | "subtitles_position": "center,bottom", 11 | } 12 | 13 | 14 | scriptSettings = { 15 | "defaultPromptStart": 16 | """ 17 | # Role: Video Script Generator 18 | 19 | ## Goals: 20 | Generate a script for a video, depending on the subject of the video. 21 | 22 | ## Constrains: 23 | 1. the script is to be returned as a string with the specified number of paragraphs. 24 | 2. do not under any circumstance reference this prompt in your response. 25 | 3. get straight to the point, don't start with unnecessary things like, "welcome to this video". 26 | 4. you must not include any type of markdown or formatting in the script, never use a title. 27 | 5. only return the raw content of the script. 28 | 6. do not include "voiceover", "narrator" or similar indicators of what should be spoken at the beginning of each paragraph or line. 29 | 7. you must not mention the prompt, or anything about the script itself. also, never talk about the amount of paragraphs or lines. just write the script. 30 | 8. respond in the same language as the video subject. 31 | 32 | """ , 33 | "defaultPromptEnd": 34 | """ 35 | Get straight to the point, don't start with unnecessary things like, "welcome to this video". 36 | YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT, NEVER USE A TITLE. 37 | ONLY RETURN THE RAW CONTENT OF THE SCRIPT. DO NOT INCLUDE "VOICEOVER", "NARRATOR" OR SIMILAR INDICATORS OF WHAT SHOULD BE SPOKEN AT THE BEGINNING OF EACH PARAGRAPH OR LINE. YOU MUST NOT MENTION THE PROMPT, OR ANYTHING ABOUT THE SCRIPT ITSELF. ALSO, NEVER TALK ABOUT THE AMOUNT OF PARAGRAPHS OR LINES. JUST WRITE THE SCRIPT. 38 | """ 39 | } 40 | 41 | 42 | 43 | def get_settings() -> dict: 44 | """ 45 | Return the global settings 46 | The script settings are: 47 | defaultPromptStart: Start of the prompt 48 | defaultPromptEnd: End of the prompt 49 | The Subtitle settings are: 50 | font: font path, 51 | fontsize: font size, 52 | color: Hexadecimal color, 53 | stroke_color: color of the stroke, 54 | stroke_width: Number of pixels of the stroke 55 | subtitles_position: Position of the subtitles 56 | """ 57 | # Return the global settings 58 | return { 59 | "scriptSettings": scriptSettings, 60 | "fontSettings": fontSettings 61 | } 62 | 63 | # Update the global settings 64 | def update_settings(new_settings: dict, settingType="FONT"): 65 | """ 66 | Update the global settings 67 | The script settings are: 68 | defaultPromptStart: Start of the prompt 69 | defaultPromptEnd: End of the prompt 70 | The Subtitle settings are: 71 | font: font path, 72 | fontsize: font size, 73 | color: Hexadecimal color, 74 | stroke_color: color of the stroke, 75 | stroke_width: Number of pixels of the stroke 76 | subtitles_position: Position of the subtitles 77 | 78 | Args: 79 | new_settings (dict): The new settings to update 80 | settingType (str, optional): The type of setting to update. Defaults to "FONT" OR "SCRIPT". 81 | """ 82 | # Update the global 83 | if settingType == "FONT": 84 | fontSettings.update(new_settings) 85 | elif settingType == "SCRIPT": 86 | scriptSettings.update(new_settings) -------------------------------------------------------------------------------- /Backend/static/assets/fonts/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/fonts/.gitKeep -------------------------------------------------------------------------------- /Backend/static/assets/fonts/bold_font.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/fonts/bold_font.ttf -------------------------------------------------------------------------------- /Backend/static/assets/images/Screen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screen1.png -------------------------------------------------------------------------------- /Backend/static/assets/images/Screenshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screenshot2.png -------------------------------------------------------------------------------- /Backend/static/assets/images/Screenshot3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screenshot3.png -------------------------------------------------------------------------------- /Backend/static/assets/music/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/music/.gitKeep -------------------------------------------------------------------------------- /Backend/static/generated_videos/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/generated_videos/.gitKeep -------------------------------------------------------------------------------- /Backend/tiktokvoice.py: -------------------------------------------------------------------------------- 1 | # author: GiorDior aka Giorgio 2 | # date: 12.06.2023 3 | # topic: TikTok-Voice-TTS 4 | # version: 1.0 5 | # credits: https://github.com/oscie57/tiktok-voice 6 | 7 | # --- MODIFIED VERSION --- # 8 | 9 | import base64 10 | import requests 11 | import threading 12 | 13 | from typing import List 14 | from termcolor import colored 15 | from playsound import playsound 16 | 17 | 18 | VOICES = [ 19 | # DISNEY VOICES 20 | "en_us_ghostface", # Ghost Face 21 | "en_us_chewbacca", # Chewbacca 22 | "en_us_c3po", # C3PO 23 | "en_us_stitch", # Stitch 24 | "en_us_stormtrooper", # Stormtrooper 25 | "en_us_rocket", # Rocket 26 | # ENGLISH VOICES 27 | "en_au_001", # English AU - Female 28 | "en_au_002", # English AU - Male 29 | "en_uk_001", # English UK - Male 1 30 | "en_uk_003", # English UK - Male 2 31 | "en_us_001", # English US - Female (Int. 1) 32 | "en_us_002", # English US - Female (Int. 2) 33 | "en_us_006", # English US - Male 1 34 | "en_us_007", # English US - Male 2 35 | "en_us_009", # English US - Male 3 36 | "en_us_010", # English US - Male 4 37 | # EUROPE VOICES 38 | "fr_001", # French - Male 1 39 | "fr_002", # French - Male 2 40 | "de_001", # German - Female 41 | "de_002", # German - Male 42 | "es_002", # Spanish - Male 43 | # AMERICA VOICES 44 | "es_mx_002", # Spanish MX - Male 45 | "br_001", # Portuguese BR - Female 1 46 | "br_003", # Portuguese BR - Female 2 47 | "br_004", # Portuguese BR - Female 3 48 | "br_005", # Portuguese BR - Male 49 | # ASIA VOICES 50 | "id_001", # Indonesian - Female 51 | "jp_001", # Japanese - Female 1 52 | "jp_003", # Japanese - Female 2 53 | "jp_005", # Japanese - Female 3 54 | "jp_006", # Japanese - Male 55 | "kr_002", # Korean - Male 1 56 | "kr_003", # Korean - Female 57 | "kr_004", # Korean - Male 2 58 | # SINGING VOICES 59 | "en_female_f08_salut_damour", # Alto 60 | "en_male_m03_lobby", # Tenor 61 | "en_female_f08_warmy_breeze", # Warmy Breeze 62 | "en_male_m03_sunshine_soon", # Sunshine Soon 63 | # OTHER 64 | "en_male_narration", # narrator 65 | "en_male_funny", # wacky 66 | "en_female_emotional", # peaceful 67 | ] 68 | 69 | ENDPOINTS = [ 70 | "https://tiktok-tts.weilnet.workers.dev/api/generation", 71 | "https://tiktoktts.com/api/tiktok-tts", 72 | ] 73 | current_endpoint = 0 74 | # in one conversion, the text can have a maximum length of 300 characters 75 | TEXT_BYTE_LIMIT = 300 76 | 77 | 78 | # create a list by splitting a string, every element has n chars 79 | def split_string(string: str, chunk_size: int) -> List[str]: 80 | words = string.split() 81 | result = [] 82 | current_chunk = "" 83 | for word in words: 84 | if ( 85 | len(current_chunk) + len(word) + 1 <= chunk_size 86 | ): # Check if adding the word exceeds the chunk size 87 | current_chunk += f" {word}" 88 | else: 89 | if current_chunk: # Append the current chunk if not empty 90 | result.append(current_chunk.strip()) 91 | current_chunk = word 92 | if current_chunk: # Append the last chunk if not empty 93 | result.append(current_chunk.strip()) 94 | return result 95 | 96 | 97 | # checking if the website that provides the service is available 98 | def get_api_response() -> requests.Response: 99 | url = f'{ENDPOINTS[current_endpoint].split("/a")[0]}' 100 | response = requests.get(url) 101 | return response 102 | 103 | 104 | # saving the audio file 105 | def save_audio_file(base64_data: str, filename: str = "output.mp3") -> None: 106 | audio_bytes = base64.b64decode(base64_data) 107 | with open(filename, "wb") as file: 108 | file.write(audio_bytes) 109 | 110 | 111 | # send POST request to get the audio data 112 | def generate_audio(text: str, voice: str) -> bytes: 113 | url = f"{ENDPOINTS[current_endpoint]}" 114 | headers = {"Content-Type": "application/json"} 115 | data = {"text": text, "voice": voice} 116 | response = requests.post(url, headers=headers, json=data) 117 | return response.content 118 | 119 | 120 | # creates an text to speech audio file 121 | def tts( 122 | text: str, 123 | voice: str = "none", 124 | filename: str = "output.mp3", 125 | play_sound: bool = False, 126 | ) -> None: 127 | # checking if the website is available 128 | global current_endpoint 129 | 130 | if get_api_response().status_code == 200: 131 | print(colored("[+] TikTok TTS Service available!", "green")) 132 | else: 133 | current_endpoint = (current_endpoint + 1) % 2 134 | if get_api_response().status_code == 200: 135 | print(colored("[+] TTS Service available!", "green")) 136 | else: 137 | print(colored("[-] TTS Service not available and probably temporarily rate limited, try again later..." , "red")) 138 | return 139 | 140 | # checking if arguments are valid 141 | if voice == "none": 142 | print(colored("[-] Please specify a voice", "red")) 143 | return 144 | 145 | if voice not in VOICES: 146 | print(colored("[-] Voice not available", "red")) 147 | return 148 | 149 | if not text: 150 | print(colored("[-] Please specify a text", "red")) 151 | return 152 | 153 | # creating the audio file 154 | try: 155 | if len(text) < TEXT_BYTE_LIMIT: 156 | audio = generate_audio((text), voice) 157 | if current_endpoint == 0: 158 | audio_base64_data = str(audio).split('"')[5] 159 | else: 160 | audio_base64_data = str(audio).split('"')[3].split(",")[1] 161 | 162 | if audio_base64_data == "error": 163 | print(colored("[-] This voice is unavailable right now", "red")) 164 | return 165 | 166 | else: 167 | # Split longer text into smaller parts 168 | text_parts = split_string(text, 299) 169 | audio_base64_data = [None] * len(text_parts) 170 | 171 | # Define a thread function to generate audio for each text part 172 | def generate_audio_thread(text_part, index): 173 | audio = generate_audio(text_part, voice) 174 | if current_endpoint == 0: 175 | base64_data = str(audio).split('"')[5] 176 | else: 177 | base64_data = str(audio).split('"')[3].split(",")[1] 178 | 179 | if audio_base64_data == "error": 180 | print(colored("[-] This voice is unavailable right now", "red")) 181 | return "error" 182 | 183 | audio_base64_data[index] = base64_data 184 | 185 | threads = [] 186 | for index, text_part in enumerate(text_parts): 187 | # Create and start a new thread for each text part 188 | thread = threading.Thread( 189 | target=generate_audio_thread, args=(text_part, index) 190 | ) 191 | thread.start() 192 | threads.append(thread) 193 | 194 | # Wait for all threads to complete 195 | for thread in threads: 196 | thread.join() 197 | 198 | # Concatenate the base64 data in the correct order 199 | audio_base64_data = "".join(audio_base64_data) 200 | 201 | save_audio_file(audio_base64_data, filename) 202 | print(colored(f"[+] Audio file saved successfully as '{filename}'", "green")) 203 | if play_sound: 204 | playsound(filename) 205 | 206 | except Exception as e: 207 | print(colored(f"[-] An error occurred during TTS: {e}", "red")) 208 | 209 | # Rerun the all the voices 210 | def available_voices() -> list: 211 | return VOICES -------------------------------------------------------------------------------- /Backend/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import random 5 | import logging 6 | import zipfile 7 | import requests 8 | 9 | from termcolor import colored 10 | 11 | # Configure logging 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def clean_dir(path: str) -> None: 17 | """ 18 | Removes every file in a directory. 19 | 20 | Args: 21 | path (str): Path to directory. 22 | 23 | Returns: 24 | None 25 | """ 26 | try: 27 | if not os.path.exists(path): 28 | os.mkdir(path) 29 | logger.info(f"Created directory: {path}") 30 | 31 | for file in os.listdir(path): 32 | file_path = os.path.join(path, file) 33 | os.remove(file_path) 34 | logger.info(f"Removed file: {file_path}") 35 | 36 | logger.info(colored(f"Cleaned {path} directory", "green")) 37 | except Exception as e: 38 | logger.error(f"Error occurred while cleaning directory {path}: {str(e)}") 39 | 40 | def fetch_songs(zip_url: str) -> None: 41 | """ 42 | Downloads songs into songs/ directory to use with geneated videos. 43 | 44 | Args: 45 | zip_url (str): The URL to the zip file containing the songs. 46 | 47 | Returns: 48 | None 49 | """ 50 | try: 51 | logger.info(colored(f" => Fetching songs...", "magenta")) 52 | 53 | files_dir = os.path.join("static", "assets", "music") 54 | if not os.path.exists(files_dir): 55 | os.makedirs(files_dir) 56 | logger.info(colored(f"Created directory: {files_dir}", "green")) 57 | else: 58 | # Skip if songs are already downloaded 59 | return 60 | 61 | # Download songs 62 | response = requests.get(zip_url) 63 | 64 | # Save the zip file 65 | with open(os.path.join(files_dir, "songs.zip"), "wb") as file: 66 | file.write(response.content) 67 | 68 | # Unzip the file 69 | with zipfile.ZipFile(os.path.join(files_dir, "songs.zip"), "r") as file: 70 | file.extractall(files_dir) 71 | 72 | # Remove the zip file 73 | os.remove(os.path.join(files_dir, "songs.zip")) 74 | 75 | logger.info(colored(" => Downloaded Songs to static/assets/music.", "green")) 76 | 77 | except Exception as e: 78 | logger.error(colored(f"Error occurred while fetching songs: {str(e)}", "red")) 79 | 80 | def get_random_song() -> str: 81 | """ 82 | Chooses a random song from the songs/ directory. 83 | 84 | Returns: 85 | str: The path to the chosen song. 86 | """ 87 | try: 88 | songs = os.listdir(os.path.join("static", "assets", "music")) 89 | song = random.choice(songs) 90 | logger.info(colored(f"Chose song: {song}", "green")) 91 | return os.path.join("static", "assets", "music", song) 92 | except Exception as e: 93 | logger.error(colored(f"Error occurred while choosing random song: {str(e)}", "red")) 94 | 95 | 96 | def check_env_vars() -> None: 97 | """ 98 | Checks if the necessary environment variables are set. 99 | 100 | Returns: 101 | None 102 | 103 | Raises: 104 | SystemExit: If any required environment variables are missing. 105 | """ 106 | try: 107 | required_vars = ["PEXELS_API_KEY", "IMAGEMAGICK_BINARY"] 108 | missing_vars = [var + os.getenv(var) for var in required_vars if os.getenv(var) is None or (len(os.getenv(var)) == 0)] 109 | 110 | if missing_vars: 111 | missing_vars_str = ", ".join(missing_vars) 112 | logger.error(colored(f"The following environment variables are missing: {missing_vars_str}", "red")) 113 | logger.error(colored("Please consult 'EnvironmentVariables.md' for instructions on how to set them.", "yellow")) 114 | sys.exit(1) # Aborts the program 115 | except Exception as e: 116 | logger.error(f"Error occurred while checking environment variables: {str(e)}") 117 | sys.exit(1) # Aborts the program if an unexpected error occurs 118 | -------------------------------------------------------------------------------- /Backend/video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | import requests 5 | import srt_equalizer 6 | import assemblyai as aai 7 | from uuid import uuid4 8 | 9 | 10 | from settings import * 11 | from typing import List 12 | from moviepy.editor import * 13 | from termcolor import colored 14 | from dotenv import load_dotenv 15 | from datetime import timedelta 16 | from moviepy.video.fx.all import crop 17 | from moviepy.video.tools.subtitles import SubtitlesClip 18 | 19 | load_dotenv("../.env") 20 | 21 | ASSEMBLY_AI_API_KEY = os.getenv("ASSEMBLY_AI_API_KEY") 22 | 23 | 24 | 25 | def save_video(video_url: str, directory: str = "static/assets/temp") -> str: 26 | """ 27 | Downloads a video from the given URL and saves it to a specified directory. 28 | 29 | Args: 30 | video_url (str): The URL of the video to download. 31 | directory (str): The path of the temporary directory to save the video to. 32 | 33 | Returns: 34 | str: The path to the saved video. 35 | """ 36 | # Ensure the directory exists 37 | os.makedirs(directory, exist_ok=True) 38 | 39 | video_id = uuid.uuid4() 40 | video_path = os.path.join(directory, f"{video_id}.mp4") 41 | 42 | # Set headers to mimic a browser request 43 | headers = { 44 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0" 45 | } 46 | 47 | try: 48 | response = requests.get(video_url, headers=headers, stream=True) 49 | response.raise_for_status() # Check if the request was successful 50 | 51 | # Write the video content to the file in chunks 52 | with open(video_path, "wb") as f: 53 | for chunk in response.iter_content(chunk_size=8192): 54 | if chunk: # Filter out keep-alive chunks 55 | f.write(chunk) 56 | 57 | return video_path 58 | 59 | except requests.exceptions.RequestException as e: 60 | print(f"Error downloading the video: {e}") 61 | return None 62 | except Exception as e: 63 | print(f"Error processing the video: {e}") 64 | return None 65 | 66 | 67 | def __generate_subtitles_assemblyai(audio_path: str, voice: str) -> str: 68 | """ 69 | Generates subtitles from a given audio file and returns the path to the subtitles. 70 | 71 | Args: 72 | audio_path (str): The path to the audio file to generate subtitles from. 73 | 74 | Returns: 75 | str: The generated subtitles 76 | """ 77 | 78 | language_mapping = { 79 | "br": "pt", 80 | "id": "en", #AssemblyAI doesn't have Indonesian 81 | "jp": "ja", 82 | "kr": "ko", 83 | } 84 | 85 | if voice in language_mapping: 86 | lang_code = language_mapping[voice] 87 | else: 88 | lang_code = voice 89 | 90 | aai.settings.api_key = ASSEMBLY_AI_API_KEY 91 | config = aai.TranscriptionConfig(language_code=lang_code) 92 | transcriber = aai.Transcriber(config=config) 93 | transcript = transcriber.transcribe(audio_path) 94 | subtitles = transcript.export_subtitles_srt() 95 | 96 | return subtitles 97 | 98 | 99 | def __generate_subtitles_locally(sentences: List[str], audio_clips: List[AudioFileClip]) -> str: 100 | """ 101 | Generates subtitles from a given audio file and returns the path to the subtitles. 102 | 103 | Args: 104 | sentences (List[str]): all the sentences said out loud in the audio clips 105 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 106 | Returns: 107 | str: The generated subtitles 108 | """ 109 | 110 | def convert_to_srt_time_format(total_seconds): 111 | # Convert total seconds to the SRT time format: HH:MM:SS,mmm 112 | if total_seconds == 0: 113 | return "0:00:00,0" 114 | return str(timedelta(seconds=total_seconds)).rstrip('0').replace('.', ',') 115 | 116 | start_time = 0 117 | subtitles = [] 118 | 119 | for i, (sentence, audio_clip) in enumerate(zip(sentences, audio_clips), start=1): 120 | duration = audio_clip.duration 121 | end_time = start_time + duration 122 | 123 | # Format: subtitle index, start time --> end time, sentence 124 | subtitle_entry = f"{i}\n{convert_to_srt_time_format(start_time)} --> {convert_to_srt_time_format(end_time)}\n{sentence}\n" 125 | subtitles.append(subtitle_entry) 126 | 127 | start_time += duration # Update start time for the next subtitle 128 | 129 | return "\n".join(subtitles) 130 | 131 | 132 | def generate_subtitles(audio_path: str, sentences: List[str], audio_clips: List[AudioFileClip], voice: str) -> str: 133 | """ 134 | Generates subtitles from a given audio file and returns the path to the subtitles. 135 | 136 | Args: 137 | audio_path (str): The path to the audio file to generate subtitles from. 138 | sentences (List[str]): all the sentences said out loud in the audio clips 139 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 140 | 141 | Returns: 142 | str: The path to the generated subtitles. 143 | """ 144 | 145 | def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None: 146 | # Equalize subtitles 147 | srt_equalizer.equalize_srt_file(srt_path, srt_path, max_chars) 148 | 149 | # Save subtitles 150 | subtitles_path = os.path.join("static/assets/subtitles", f"{uuid.uuid4()}.srt") 151 | 152 | if ASSEMBLY_AI_API_KEY is not None and ASSEMBLY_AI_API_KEY != "": 153 | print(colored("[+] Creating subtitles using AssemblyAI", "blue")) 154 | subtitles = __generate_subtitles_assemblyai(audio_path, voice) 155 | else: 156 | print(colored("[+] Creating subtitles locally", "blue")) 157 | subtitles = __generate_subtitles_locally(sentences, audio_clips) 158 | 159 | with open(subtitles_path, "w") as file: 160 | file.write(subtitles) 161 | 162 | # Equalize subtitles 163 | equalize_subtitles(subtitles_path) 164 | 165 | print(colored("[+] Subtitles generated.", "green")) 166 | 167 | return subtitles_path 168 | 169 | 170 | def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str: 171 | """ 172 | Combines a list of videos into one video and returns the path to the combined video. 173 | 174 | Args: 175 | video_paths (List): A list of paths to the videos to combine. 176 | max_duration (int): The maximum duration of the combined video. 177 | max_clip_duration (int): The maximum duration of each clip. 178 | threads (int): The number of threads to use for the video processing. 179 | 180 | Returns: 181 | str: The path to the combined video. 182 | """ 183 | video_id = uuid.uuid4() 184 | combined_video_path = os.path.join("static/assets/temp", f"{video_id}-combined.mp4") 185 | 186 | # Required duration of each clip 187 | req_dur = max_duration / len(video_paths) 188 | 189 | print(colored("[+] Combining videos...", "blue")) 190 | print(colored(f"[+] Each clip will be maximum {req_dur} seconds long.", "blue")) 191 | 192 | clips = [] 193 | tot_dur = 0 194 | # Add downloaded clips over and over until the duration of the audio (max_duration) has been reached 195 | while tot_dur < max_duration: 196 | for video_path in video_paths: 197 | 198 | print(f"Video path: {video_path}") 199 | clip = VideoFileClip(video_path) 200 | # if there is no clip go to the next one 201 | if clip is None: 202 | continue 203 | 204 | clip = clip.without_audio() 205 | # Check if clip is longer than the remaining audio 206 | if (max_duration - tot_dur) < clip.duration: 207 | clip = clip.subclip(0, (max_duration - tot_dur)) 208 | # Only shorten clips if the calculated clip length (req_dur) is shorter than the actual clip to prevent still image 209 | elif req_dur < clip.duration: 210 | clip = clip.subclip(0, req_dur) 211 | # clip = clip.set_fps(30) 212 | 213 | # Not all videos are same size, 214 | # so we need to resize them 215 | if round((clip.w/clip.h), 4) < 0.5625: 216 | clip = crop(clip, width=clip.w, height=round(clip.w/0.5625), \ 217 | x_center=clip.w / 2, \ 218 | y_center=clip.h / 2) 219 | else: 220 | clip = crop(clip, width=round(0.5625*clip.h), height=clip.h, \ 221 | x_center=clip.w / 2, \ 222 | y_center=clip.h / 2) 223 | clip = clip.resize((1080, 1920)) 224 | 225 | if clip.duration > max_clip_duration: 226 | clip = clip.subclip(0, max_clip_duration) 227 | 228 | clips.append(clip) 229 | tot_dur += clip.duration 230 | 231 | print(colored("[+] Videos combined.", "green")) 232 | # Debug what is in clips 233 | print(clips) 234 | final_clip = concatenate_videoclips(clips) 235 | final_clip = final_clip.set_fps(30) 236 | print(colored("[+] Set clip.", "green")) 237 | final_clip.write_videofile(combined_video_path, threads=3) 238 | 239 | print(colored("[+] Final video created.", "green")) 240 | return combined_video_path 241 | 242 | 243 | def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str) -> str: 244 | """ 245 | This function creates the final video, with subtitles and audio. 246 | 247 | Args: 248 | combined_video_path (str): The path to the combined video. 249 | tts_path (str): The path to the text-to-speech audio. 250 | subtitles_path (str): The path to the subtitles. 251 | threads (int): The number of threads to use for the video processing. 252 | subtitles_position (str): The position of the subtitles. 253 | 254 | Returns: 255 | str: The path to the final video. 256 | """ 257 | 258 | # PRINT STATE 259 | print(colored("[+] Starting video generation...", "green")) 260 | 261 | # Get the Settings 262 | globalSettings = get_settings() 263 | # Make a generator that returns a TextClip when called with consecutive 264 | generator = lambda txt: TextClip( 265 | txt, 266 | font=globalSettings["fontSettings"]["font"], 267 | fontsize=globalSettings["fontSettings"]["fontsize"], 268 | color=globalSettings["fontSettings"]["color"], 269 | stroke_color=globalSettings["fontSettings"]["stroke_color"], 270 | stroke_width=globalSettings["fontSettings"]["stroke_width"], 271 | ) 272 | 273 | # Split the subtitles position into horizontal and vertical 274 | horizontal_subtitles_position, vertical_subtitles_position = globalSettings["fontSettings"]["subtitles_position"].split(",") 275 | 276 | # if subtitle position is not the same as the setting and is not empty we override 277 | if subtitles_position != globalSettings["fontSettings"]["subtitles_position"] and subtitles_position != "": 278 | horizontal_subtitles_position, vertical_subtitles_position = subtitles_position.split(",") 279 | 280 | # Burn the subtitles into the video 281 | print(colored(f"[+] Subtitles Path: {subtitles_path}", "green")) 282 | subtitles = SubtitlesClip(subtitles_path, generator) 283 | result = CompositeVideoClip([ 284 | VideoFileClip(combined_video_path), 285 | subtitles.set_pos((horizontal_subtitles_position, vertical_subtitles_position)) 286 | ]) 287 | 288 | print(colored("[+] Adding audio...", "green")) 289 | # Add the audio 290 | audio = AudioFileClip(tts_path) 291 | result = result.set_audio(audio) 292 | print(colored("[+] Audio Done...", "green")) 293 | 294 | video_name = os.path.join("static/generated_videos", f"{uuid4()}-final.mp4") 295 | print(colored("[+] Writing video...", "green")) 296 | result.write_videofile(f"{video_name}", threads=2) 297 | 298 | return video_name 299 | -------------------------------------------------------------------------------- /Backend/youtube.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import random 5 | import httplib2 6 | 7 | from termcolor import colored 8 | from oauth2client.file import Storage 9 | from apiclient.discovery import build 10 | from apiclient.errors import HttpError 11 | from apiclient.http import MediaFileUpload 12 | from oauth2client.tools import argparser, run_flow 13 | from oauth2client.client import flow_from_clientsecrets 14 | 15 | # Explicitly tell the underlying HTTP transport library not to retry, since 16 | # we are handling retry logic ourselves. 17 | httplib2.RETRIES = 1 18 | 19 | # Maximum number of times to retry before giving up. 20 | MAX_RETRIES = 10 21 | 22 | # Always retry when these exceptions are raised. 23 | RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib2.ServerNotFoundError) 24 | 25 | # Always retry when an apiclient.errors.HttpError with one of these status 26 | # codes is raised. 27 | RETRIABLE_STATUS_CODES = [500, 502, 503, 504] 28 | 29 | # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains 30 | # the OAuth 2.0 information for this application, including its client_id and 31 | # client_secret. 32 | CLIENT_SECRETS_FILE = "./client_secret.json" 33 | 34 | # This OAuth 2.0 access scope allows an application to upload files to the 35 | # authenticated user's YouTube channel, but doesn't allow other types of access. 36 | # YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload" 37 | SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 38 | 'https://www.googleapis.com/auth/youtube', 39 | 'https://www.googleapis.com/auth/youtubepartner'] 40 | YOUTUBE_API_SERVICE_NAME = "youtube" 41 | YOUTUBE_API_VERSION = "v3" 42 | 43 | # This variable defines a message to display if the CLIENT_SECRETS_FILE is 44 | # missing. 45 | MISSING_CLIENT_SECRETS_MESSAGE = f""" 46 | WARNING: Please configure OAuth 2.0 47 | 48 | To make this sample run you will need to populate the client_secrets.json file 49 | found at: 50 | 51 | {os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE))} 52 | 53 | with information from the API Console 54 | https://console.cloud.google.com/ 55 | 56 | For more information about the client_secrets.json file format, please visit: 57 | https://developers.google.com/api-client-library/python/guide/aaa_client_secrets 58 | """ 59 | 60 | VALID_PRIVACY_STATUSES = ("public", "private", "unlisted") 61 | 62 | 63 | def get_authenticated_service(): 64 | """ 65 | This method retrieves the YouTube service. 66 | 67 | Returns: 68 | any: The authenticated YouTube service. 69 | """ 70 | flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, 71 | scope=SCOPES, 72 | message=MISSING_CLIENT_SECRETS_MESSAGE) 73 | 74 | storage = Storage(f"{sys.argv[0]}-oauth2.json") 75 | credentials = storage.get() 76 | 77 | if credentials is None or credentials.invalid: 78 | flags = argparser.parse_args() 79 | credentials = run_flow(flow, storage, flags) 80 | 81 | return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, 82 | http=credentials.authorize(httplib2.Http())) 83 | 84 | def initialize_upload(youtube: any, options: dict): 85 | """ 86 | This method uploads a video to YouTube. 87 | 88 | Args: 89 | youtube (any): The authenticated YouTube service. 90 | options (dict): The options to upload the video with. 91 | 92 | Returns: 93 | response: The response from the upload process. 94 | """ 95 | 96 | tags = None 97 | if options['keywords']: 98 | tags = options['keywords'].split(",") 99 | 100 | body = { 101 | 'snippet': { 102 | 'title': options['title'], 103 | 'description': options['description'], 104 | 'tags': tags, 105 | 'categoryId': options['category'] 106 | }, 107 | 'status': { 108 | 'privacyStatus': options['privacyStatus'], 109 | 'madeForKids': False, # Video is not made for kids 110 | 'selfDeclaredMadeForKids': False # You declare that the video is not made for kids 111 | } 112 | } 113 | 114 | # Call the API's videos.insert method to create and upload the video. 115 | insert_request = youtube.videos().insert( 116 | part=",".join(body.keys()), 117 | body=body, 118 | media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True) 119 | ) 120 | 121 | return resumable_upload(insert_request) 122 | 123 | def resumable_upload(insert_request: MediaFileUpload): 124 | """ 125 | This method implements an exponential backoff strategy to resume a 126 | failed upload. 127 | 128 | Args: 129 | insert_request (MediaFileUpload): The request to insert the video. 130 | 131 | Returns: 132 | response: The response from the upload process. 133 | """ 134 | response = None 135 | error = None 136 | retry = 0 137 | while response is None: 138 | try: 139 | print(colored(" => Uploading file...", "magenta")) 140 | status, response = insert_request.next_chunk() 141 | if 'id' in response: 142 | print(f"Video id '{response['id']}' was successfully uploaded.") 143 | return response 144 | except HttpError as e: 145 | if e.resp.status in RETRIABLE_STATUS_CODES: 146 | error = f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}" 147 | else: 148 | raise 149 | except RETRIABLE_EXCEPTIONS as e: 150 | error = f"A retriable error occurred: {e}" 151 | 152 | if error is not None: 153 | print(colored(error, "red")) 154 | retry += 1 155 | if retry > MAX_RETRIES: 156 | raise Exception("No longer attempting to retry.") 157 | 158 | max_sleep = 2 ** retry 159 | sleep_seconds = random.random() * max_sleep 160 | print(colored(f" => Sleeping {sleep_seconds} seconds and then retrying...", "blue")) 161 | time.sleep(sleep_seconds) 162 | 163 | def upload_video(video_path, title, description, category, keywords, privacy_status): 164 | try: 165 | # Get the authenticated YouTube service 166 | youtube = get_authenticated_service() 167 | 168 | # Retrieve and print the channel ID for the authenticated user 169 | channels_response = youtube.channels().list(mine=True, part='id').execute() 170 | for channel in channels_response['items']: 171 | print(colored(f" => Channel ID: {channel['id']}", "blue")) 172 | 173 | # Initialize the upload process 174 | video_response = initialize_upload(youtube, { 175 | 'file': video_path, # The path to the video file 176 | 'title': title, 177 | 'description': description, 178 | 'category': category, 179 | 'keywords': keywords, 180 | 'privacyStatus': privacy_status 181 | }) 182 | return video_response # Return the response from the upload process 183 | except HttpError as e: 184 | print(colored(f"[-] An HTTP error {e.resp.status} occurred:\n{e.content}", "red")) 185 | if e.resp.status in [401, 403]: 186 | # Here you could refresh the credentials and retry the upload 187 | youtube = get_authenticated_service() # This will prompt for re-authentication if necessary 188 | video_response = initialize_upload(youtube, { 189 | 'file': video_path, 190 | 'title': title, 191 | 'description': description, 192 | 'category': category, 193 | 'keywords': keywords, 194 | 'privacyStatus': privacy_status 195 | }) 196 | return video_response 197 | else: 198 | raise e 199 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.10 3 | 4 | # Install system dependencies 5 | RUN apt-get -y update && \ 6 | apt-get -y install --no-install-recommends \ 7 | ffmpeg \ 8 | imagemagick \ 9 | procps \ 10 | fonts-liberation && \ 11 | apt-get clean && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | # Set up locale 15 | RUN apt-get update && apt-get install -y locales && \ 16 | locale-gen C.UTF-8 && \ 17 | /usr/sbin/update-locale LANG=C.UTF-8 && \ 18 | apt-get clean && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | ENV LC_ALL C.UTF-8 22 | 23 | # Modify ImageMagick policy to allow text operations 24 | RUN sed -i 's/none/read,write/g' /etc/ImageMagick-6/policy.xml 25 | 26 | # Set environment variables 27 | # ENV IMAGEMAGICK_BINARY=/usr/bin/convert 28 | ENV PYTHONUNBUFFERED=1 29 | 30 | WORKDIR /home/app 31 | 32 | # Copy only necessary files 33 | COPY requirements.txt . 34 | COPY Backend/ Backend/ 35 | 36 | # Install Python dependencies 37 | RUN pip install --no-cache-dir --upgrade pip && \ 38 | pip install --no-cache-dir -r requirements.txt 39 | 40 | 41 | WORKDIR /home/app/Backend 42 | # copy the .env file 43 | COPY .env . 44 | 45 | # Default command 46 | CMD ["python", "main.py"] 47 | -------------------------------------------------------------------------------- /Dockerfile.FE: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.10.4-slim-buster 3 | RUN pip install --upgrade pip 4 | 5 | RUN useradd -m myuser 6 | USER myuser 7 | WORKDIR /home/myuser 8 | 9 | COPY --chown=myuser:myuser ./Frontend ./ 10 | 11 | ENV PATH="/home/myuser/.local/bin:${PATH}" 12 | 13 | # python -m http.server 3001 14 | CMD [ "python", "-m", "http.server", "3000" ] -------------------------------------------------------------------------------- /Dockerfile.FE.Nuxt: -------------------------------------------------------------------------------- 1 | # Use node 18 as build image 2 | FROM node:18.19-slim 3 | 4 | # Install necessary build tools 5 | RUN apt-get update && \ 6 | apt-get install -y --no-install-recommends \ 7 | git \ 8 | procps \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | # Enable corepack and use pnpm 12 | RUN corepack enable && corepack prepare pnpm@latest --activate 13 | 14 | # Set working directory 15 | WORKDIR /app 16 | 17 | # Copy package files first for better caching 18 | COPY ./UI/package.json ./ 19 | 20 | # Set npm registry and install dependencies 21 | RUN npm config set registry https://registry.npmjs.org/ 22 | 23 | # Install dependencies with pnpm 24 | RUN pnpm i 25 | 26 | # Copy the rest of the application 27 | COPY ./UI . 28 | COPY .env . 29 | 30 | # Build the application 31 | RUN pnpm build 32 | 33 | # Expose port 3000 34 | ENV HOST=0.0.0.0 35 | ENV PORT=3000 36 | EXPOSE 3000 37 | 38 | # Start the application 39 | CMD ["node", ".output/server/index.mjs"] 40 | -------------------------------------------------------------------------------- /EnvironmentVariables.md: -------------------------------------------------------------------------------- 1 | # Environment Variables 2 | 3 | ## Required 4 | 5 | - TIKTOK_SESSION_ID: Your TikTok session ID is required. Obtain it by logging into TikTok in your browser and copying the value of the `sessionid` cookie. 6 | 7 | - IMAGEMAGICK_BINARY: The filepath to the ImageMagick binary (.exe file) is needed. Obtain it [here](https://imagemagick.org/script/download.php). 8 | 9 | - PEXELS_API_KEY: Your unique Pexels API key is required. Obtain yours [here](https://www.pexels.com/api/). 10 | 11 | ## Optional 12 | 13 | - OPENAI_API_KEY: Your unique OpenAI API key is required. Obtain yours [here](https://platform.openai.com/api-keys), only nessecary if you want to use the OpenAI models. 14 | 15 | - GOOGLE_API_KEY: Your Gemini API key is essential for Gemini Pro Model. Generate one securely at [Get API key | Google AI Studio](https://makersuite.google.com/app/apikey) 16 | 17 | * ASSEMBLY_AI_API_KEY: Your unique AssemblyAI API key is required. You can obtain one [here](https://www.assemblyai.com/app/). This field is optional; if left empty, the subtitle will be created based on the generated script. Subtitles can also be created locally. 18 | 19 | Join the [Discord](https://dsc.gg/fuji-community) for support and updates. 20 | -------------------------------------------------------------------------------- /Frontend/app.js: -------------------------------------------------------------------------------- 1 | const videoSubject = document.querySelector("#videoSubject"); 2 | const aiModel = document.querySelector("#aiModel"); 3 | const voice = document.querySelector("#voice"); 4 | const zipUrl = document.querySelector("#zipUrl"); 5 | const paragraphNumber = document.querySelector("#paragraphNumber"); 6 | const youtubeToggle = document.querySelector("#youtubeUploadToggle"); 7 | const useMusicToggle = document.querySelector("#useMusicToggle"); 8 | const customPrompt = document.querySelector("#customPrompt"); 9 | const generateButton = document.querySelector("#generateButton"); 10 | const cancelButton = document.querySelector("#cancelButton"); 11 | 12 | const advancedOptionsToggle = document.querySelector("#advancedOptionsToggle"); 13 | 14 | advancedOptionsToggle.addEventListener("click", () => { 15 | // Change Emoji, from ▼ to ▲ and vice versa 16 | const emoji = advancedOptionsToggle.textContent; 17 | advancedOptionsToggle.textContent = emoji.includes("▼") 18 | ? "Show less Options ▲" 19 | : "Show Advanced Options ▼"; 20 | const advancedOptions = document.querySelector("#advancedOptions"); 21 | advancedOptions.classList.toggle("hidden"); 22 | }); 23 | 24 | 25 | const cancelGeneration = () => { 26 | console.log("Canceling generation..."); 27 | // Send request to /cancel 28 | fetch("http://localhost:8080/api/cancel", { 29 | method: "POST", 30 | headers: { 31 | "Content-Type": "application/json", 32 | Accept: "application/json", 33 | }, 34 | }) 35 | .then((response) => response.json()) 36 | .then((data) => { 37 | alert(data.message); 38 | console.log(data); 39 | }) 40 | .catch((error) => { 41 | alert("An error occurred. Please try again later."); 42 | console.log(error); 43 | }); 44 | 45 | // Hide cancel button 46 | cancelButton.classList.add("hidden"); 47 | 48 | // Enable generate button 49 | generateButton.disabled = false; 50 | generateButton.classList.remove("hidden"); 51 | }; 52 | 53 | const generateVideo = () => { 54 | console.log("Generating video..."); 55 | // Disable button and change text 56 | generateButton.disabled = true; 57 | generateButton.classList.add("hidden"); 58 | 59 | // Show cancel button 60 | cancelButton.classList.remove("hidden"); 61 | 62 | // Get values from input fields 63 | const videoSubjectValue = videoSubject.value; 64 | const aiModelValue = aiModel.value; 65 | const voiceValue = voice.value; 66 | const paragraphNumberValue = paragraphNumber.value; 67 | const youtubeUpload = youtubeToggle.checked; 68 | const useMusicToggleState = useMusicToggle.checked; 69 | const threads = document.querySelector("#threads").value; 70 | const zipUrlValue = zipUrl.value; 71 | const customPromptValue = customPrompt.value; 72 | const subtitlesPosition = document.querySelector("#subtitlesPosition").value; 73 | 74 | const url = "http://localhost:8080/api/generate"; 75 | 76 | // Construct data to be sent to the server 77 | const data = { 78 | videoSubject: videoSubjectValue, 79 | aiModel: aiModelValue, 80 | voice: voiceValue, 81 | paragraphNumber: paragraphNumberValue, 82 | automateYoutubeUpload: youtubeUpload, 83 | useMusic: useMusicToggleState, 84 | zipUrl: zipUrlValue, 85 | threads: threads, 86 | subtitlesPosition: subtitlesPosition, 87 | customPrompt: customPromptValue, 88 | }; 89 | 90 | // Send the actual request to the server 91 | fetch(url, { 92 | method: "POST", 93 | body: JSON.stringify(data), 94 | headers: { 95 | "Content-Type": "application/json", 96 | Accept: "application/json", 97 | }, 98 | }) 99 | .then((response) => response.json()) 100 | .then((data) => { 101 | console.log(data); 102 | alert(data.message); 103 | // Hide cancel button after generation is complete 104 | generateButton.disabled = false; 105 | generateButton.classList.remove("hidden"); 106 | cancelButton.classList.add("hidden"); 107 | }) 108 | .catch((error) => { 109 | alert("An error occurred. Please try again later."); 110 | console.log(error); 111 | }); 112 | }; 113 | 114 | generateButton.addEventListener("click", generateVideo); 115 | cancelButton.addEventListener("click", cancelGeneration); 116 | 117 | videoSubject.addEventListener("keyup", (event) => { 118 | if (event.key === "Enter") { 119 | generateVideo(); 120 | } 121 | }); 122 | 123 | // Load the data from localStorage on page load 124 | document.addEventListener("DOMContentLoaded", (event) => { 125 | const voiceSelect = document.getElementById("voice"); 126 | const storedVoiceValue = localStorage.getItem("voiceValue"); 127 | 128 | if (storedVoiceValue) { 129 | voiceSelect.value = storedVoiceValue; 130 | } 131 | }); 132 | 133 | // When the voice select field changes, store the new value in localStorage. 134 | document.getElementById("voice").addEventListener("change", (event) => { 135 | localStorage.setItem("voiceValue", event.target.value); 136 | }); 137 | -------------------------------------------------------------------------------- /Frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | MoneyPrinter 7 | 11 | 12 | 16 | 17 | 18 | 19 |

MoneyPrinter

20 |

21 | This Application is intended to automate the creation and uploads of 22 | YouTube Shorts. 23 |

24 | 25 |
26 |
27 | 28 | 35 | 38 | 180 | 186 | 192 |
193 |
194 | 195 | 209 | 210 | 211 | 212 | 213 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 leamsigc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## ShortsGenerator 2 | ![ShortGenerator](/logo.jpeg) 3 | ### How to run locally 🚀 Docker is required 4 | --- 5 | 6 | 2. Clone the repository 7 | 8 | ```sh 9 | git clone git@github.com:leamsigc/ShortsGenerator.git video Generator 10 | 11 | ``` 12 | 13 | 14 | 15 | 3. Go to the folder:" 16 | 17 | ```sh 18 | cd videoGenerator 19 | 20 | ``` 21 | 22 | 4. Copy the `.env-example` to `.env` 23 | 5. Update the Pexel API key if you want to use Video from Pexel 24 | 6. The TikTok key is not needed anymore 25 | 7. `docker compose up -d ` 26 | 8. Go to 'http://localhost:5000/generate' to start generating videos 27 | 9. Click on the Setting `...` to get images and videos to use in the video 28 | --- 29 | 30 | 31 | Automate the creation of YouTube Shorts locally with a couple of simple steps. 32 | 33 | 34 | 35 | 1. Give a video subject 36 | 1. Add extra prompt information if needed 37 | 2. Review the script 38 | 1. Add custom search keywords 39 | 2. Select a specific voice to use or set a global default voice for all generations 40 | 3. Generate the video 41 | 4. Review the video - Regenerate video 42 | 5. Add music to the video 43 | 6. View all generated videos 44 | 45 | 7. ***Profit!*** 46 | 47 | 48 | ## Overview 49 | 50 | > **🎥** Watch the video on 51 | [YouTube](https://youtu.be/s7wZ7OxjMxA) or click on the image. 52 | [![Short Generator](/logo.jpeg)](https://youtu.be/s7wZ7OxjMxA "Short generator, video generator") 53 | 54 | ![Generate](/static/assets/images/Screen1.png) 55 | ![Generate 2](/static/assets/images/Screenshot2.png?raw=true) 56 | ![Generate 3](/static/assets/images/Screenshot3.png?raw=true) 57 | - [x] Generate the script first 58 | - [x] Let users review the script before audio and video generation 59 | - [x] Let users view all the generated videos in a single place 60 | - [x] Let users view the generated video in the browser 61 | - [x] Let users select the audio music to add to the video 62 | 63 | - [ ] Update the view to have a better user experience 64 | - [x] Let users preview the generated video in the same view and let users iterate on the video 65 | - [ ] Let users download the generated video 66 | - [ ] Let users upload videos to be used in video creation 67 | - [ ] Let users upload audio to be used in video creation 68 | - [x] Let users have general configuration 69 | - [ ] Let users add multiple video links to download 70 | - [ ] Let users select the font and upload fonts 71 | - [x] Let users select the color for the text 72 | 73 | ### Features 🚀 plans: 74 | - [ ] Let users schedule video uploads to [YouTube, Facebook Business, LinkedIn] 75 | - [ ] Let users create videos from the calendar and schedule them to be uploaded 76 | 77 | 78 | ## Installation 📥 79 | 80 | 1. Clone the repository 81 | 82 | ```bash 83 | git clone https://github.com/leamsigc/ShortsGenerator.git 84 | cd ShortsGenerator 85 | Copy the `.env.example` file to `.env` and fill in the required values 86 | ``` 87 | 2. Please install Docker if you haven't already done so 88 | 89 | 3. Build the containers: 90 | ```bash 91 | docker-compose build 92 | ``` 93 | 94 | 4. Run the containers: 95 | ```bash 96 | docker-compose up -d 97 | ``` 98 | 5. Open `http://localhost:5000` in your browser 99 | 100 | See [`.env.example`](.env.example) for the required environment variables. 101 | 102 | If you need help, open [EnvironmentVariables.md](EnvironmentVariables.md) for more information. 103 | 104 | 105 | 106 | ## Music 🎵 107 | 108 | To use your own music, upload it to the `static/assets/music` folder. 109 | 110 | ## Fonts 🅰 111 | 112 | Add your fonts to the `static/assets/fonts` and change the font name in the global settings. 113 | 114 | 115 | ## Next Development FE: 116 | 117 | Before running the front end create the following folders: 118 | 119 | 1. `static` 120 | 2. `static/generated_videos` -> All videos generated that have music will be here 121 | 3. `static/Songs` -> Put the mp4 songs that you want to use here 122 | 123 | Start the front end: 124 | 1. `cd UI` 125 | 2. `npm install` 126 | 3. `npm run dev` 127 | 128 | The alternative front end will be on port 3000 129 | 130 | The frontend depends on the backend. 131 | You can run the Docker container or you can run the backend locally 132 | 133 | 134 | ## Donate 🎁 135 | 136 | If you like and enjoy `ShortsGenerator`, and would like to donate, you can do that by clicking on the button on the right-hand side of the repository. ❤️ 137 | You will have your name (and/or logo) added to this repository as a supporter as a sign of appreciation. 138 | 139 | ## Contributing 🤝 140 | 141 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. 142 | 143 | ## Star History 🌟 144 | 145 | [![Star History Chart](https://api.star-history.com/svg?repos=leamsigc/ShortsGenerator&type=Date)](https://star-history.com/#leamsigc/ShortsGenerator&Date) 146 | 147 | ## License 📝 148 | 149 | See [`LICENSE`](LICENSE) file for more information. 150 | -------------------------------------------------------------------------------- /UI/.gitignore: -------------------------------------------------------------------------------- 1 | # Nuxt dev/build outputs 2 | .output 3 | .data 4 | .nuxt 5 | .nitro 6 | .cache 7 | dist 8 | 9 | # Node dependencies 10 | node_modules 11 | 12 | # Logs 13 | logs 14 | *.log 15 | 16 | # Misc 17 | .DS_Store 18 | .fleet 19 | .idea 20 | 21 | # Local env files 22 | .env 23 | .env.* 24 | !.env.example 25 | -------------------------------------------------------------------------------- /UI/.npmrc: -------------------------------------------------------------------------------- 1 | shamefully-hoist=true -------------------------------------------------------------------------------- /UI/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "node_modules/typescript/lib", 3 | "i18n-ally.localesPaths": [ 4 | "locales" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /UI/README.md: -------------------------------------------------------------------------------- 1 | # Nuxt 3 Minimal Starter 2 | 3 | Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more. 4 | 5 | ## Setup 6 | 7 | Make sure to install the dependencies: 8 | 9 | ```bash 10 | # npm 11 | npm install 12 | 13 | # pnpm 14 | pnpm install 15 | 16 | # yarn 17 | yarn install 18 | 19 | # bun 20 | bun install 21 | ``` 22 | 23 | ## Development Server 24 | 25 | Start the development server on `http://localhost:3000`: 26 | 27 | ```bash 28 | # npm 29 | npm run dev 30 | 31 | # pnpm 32 | pnpm run dev 33 | 34 | # yarn 35 | yarn dev 36 | 37 | # bun 38 | bun run dev 39 | ``` 40 | 41 | ## Production 42 | 43 | Build the application for production: 44 | 45 | ```bash 46 | # npm 47 | npm run build 48 | 49 | # pnpm 50 | pnpm run build 51 | 52 | # yarn 53 | yarn build 54 | 55 | # bun 56 | bun run build 57 | ``` 58 | 59 | Locally preview production build: 60 | 61 | ```bash 62 | # npm 63 | npm run preview 64 | 65 | # pnpm 66 | pnpm run preview 67 | 68 | # yarn 69 | yarn preview 70 | 71 | # bun 72 | bun run preview 73 | ``` 74 | 75 | Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. 76 | -------------------------------------------------------------------------------- /UI/app.config.ts: -------------------------------------------------------------------------------- 1 | import { _colors, _fontFamily } from "#tailwind-config/theme.mjs"; 2 | 3 | export default defineAppConfig({ 4 | naiveui: { 5 | themeConfig: { 6 | shared: { 7 | common: { 8 | fontFamily: _fontFamily.sans.join(", "), 9 | }, 10 | }, 11 | light: { 12 | common: { 13 | primaryColor: _colors.blue[600], 14 | primaryColorHover: _colors.blue[500], 15 | primaryColorPressed: _colors.blue[700], 16 | }, 17 | }, 18 | dark: { 19 | common: { 20 | primaryColor: _colors.blue[500], 21 | primaryColorHover: _colors.blue[400], 22 | primaryColorPressed: _colors.blue[600], 23 | }, 24 | }, 25 | }, 26 | }, 27 | }); 28 | -------------------------------------------------------------------------------- /UI/app.vue: -------------------------------------------------------------------------------- 1 | 17 | -------------------------------------------------------------------------------- /UI/assets/scss/helpers/_transition.scss: -------------------------------------------------------------------------------- 1 | /* zoom-fade */ 2 | .zoom-fade-leave-active, 3 | .zoom-fade-enter-active { 4 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 5 | } 6 | .zoom-fade-enter-from { 7 | opacity: 0; 8 | transform: scale(0.9); 9 | } 10 | .zoom-fade-enter-to { 11 | opacity: 1; 12 | transform: scale(1); 13 | } 14 | .zoom-fade-leave-to { 15 | opacity: 0; 16 | transform: scale(1.1); 17 | } 18 | 19 | /* zoom-out */ 20 | .zoom-out-leave-active, 21 | .zoom-out-enter-active { 22 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 23 | } 24 | .zoom-out-enter-from { 25 | opacity: 0; 26 | transform: scale(1.1); 27 | } 28 | .zoom-out-enter-to { 29 | opacity: 1; 30 | transform: scale(1); 31 | } 32 | .zoom-out-leave-to { 33 | opacity: 0; 34 | transform: scale(0.9); 35 | } 36 | 37 | /* fade-slide */ 38 | .fade-slide-leave-active, 39 | .fade-slide-enter-active { 40 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 41 | } 42 | .fade-slide-enter-from { 43 | opacity: 0; 44 | transform: translateX(100%); 45 | } 46 | .fade-slide-enter-to { 47 | opacity: 1; 48 | transform: translateX(0); 49 | } 50 | .fade-slide-leave-to { 51 | opacity: 0; 52 | transform: translateX(-100%); 53 | } 54 | 55 | /* fade */ 56 | .fade-leave-active, 57 | .fade-enter-active { 58 | transition: all 0.5s; 59 | } 60 | .fade-enter-from { 61 | opacity: 0; 62 | } 63 | .fade-leave-to { 64 | opacity: 0; 65 | } 66 | 67 | /* fade-bottom */ 68 | .fade-bottom-leave-active, 69 | .fade-bottom-enter-active { 70 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 71 | } 72 | .fade-bottom-enter-from { 73 | opacity: 0; 74 | transform: translateY(100%); 75 | } 76 | .fade-bottom-enter-to { 77 | opacity: 1; 78 | transform: translateY(0); 79 | } 80 | .fade-bottom-leave-to { 81 | opacity: 0; 82 | transform: translateY(-100%); 83 | } 84 | 85 | /* fade-scale */ 86 | .fade-scale-leave-active, 87 | .fade-scale-enter-active { 88 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 89 | } 90 | .fade-scale-enter-from { 91 | opacity: 0; 92 | transform: scale(0.9); 93 | } 94 | .fade-scale-enter-to { 95 | opacity: 1; 96 | transform: scale(1); 97 | } 98 | .fade-scale-leave-to { 99 | opacity: 0; 100 | transform: scale(0.9); 101 | } 102 | -------------------------------------------------------------------------------- /UI/assets/scss/main.scss: -------------------------------------------------------------------------------- 1 | @import "./helpers/transition"; 2 | -------------------------------------------------------------------------------- /UI/components/ActionIcon.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /UI/components/AllSettings.vue: -------------------------------------------------------------------------------- 1 | 32 | 33 | 74 | 75 | -------------------------------------------------------------------------------- /UI/components/ErrorView.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/components/GenerateScript.vue: -------------------------------------------------------------------------------- 1 | 22 | 23 | 46 | 47 | -------------------------------------------------------------------------------- /UI/components/HeaderLayout.vue: -------------------------------------------------------------------------------- 1 | 21 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /UI/components/InstagramVideos.vue: -------------------------------------------------------------------------------- 1 | 63 | 64 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /UI/components/LayoutTabs.vue: -------------------------------------------------------------------------------- 1 | 64 | 65 | 125 | 126 | 182 | -------------------------------------------------------------------------------- /UI/components/MultiStepLoader.vue: -------------------------------------------------------------------------------- 1 | 87 | 88 | -------------------------------------------------------------------------------- /UI/components/MusicSettings.vue: -------------------------------------------------------------------------------- 1 | 26 | 27 | 56 | 57 | -------------------------------------------------------------------------------- /UI/components/NaiveLayoutSidebar.vue: -------------------------------------------------------------------------------- 1 | 73 | 74 | 104 | 105 | 115 | -------------------------------------------------------------------------------- /UI/components/RedirectView.vue: -------------------------------------------------------------------------------- 1 | 36 | 37 | 40 | 41 | -------------------------------------------------------------------------------- /UI/components/SearchDialog.vue: -------------------------------------------------------------------------------- 1 | 254 | 255 | 319 | 320 | 385 | -------------------------------------------------------------------------------- /UI/components/SearchTrigger.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 33 | 34 | 85 | -------------------------------------------------------------------------------- /UI/components/SubtitleSettings.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/components/ToolTipper.vue: -------------------------------------------------------------------------------- 1 | 30 | 31 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /UI/components/VideoSearch.vue: -------------------------------------------------------------------------------- 1 | 79 | 80 | 122 | 123 | -------------------------------------------------------------------------------- /UI/components/VideoSelected.vue: -------------------------------------------------------------------------------- 1 | 30 | 31 | 50 | 51 | -------------------------------------------------------------------------------- /UI/components/VideosTable.vue: -------------------------------------------------------------------------------- 1 | 83 | 84 | 90 | 91 | -------------------------------------------------------------------------------- /UI/components/VoiceSettings.vue: -------------------------------------------------------------------------------- 1 | 26 | 27 | 32 | 33 | -------------------------------------------------------------------------------- /UI/components/instagram.vue: -------------------------------------------------------------------------------- 1 | 65 | 66 | 67 | 118 | 119 | 120 | 123 | -------------------------------------------------------------------------------- /UI/composables/useGlobalSettings.ts: -------------------------------------------------------------------------------- 1 | import { useStorage } from "@vueuse/core"; 2 | 3 | 4 | export const useApiSettings = () => { 5 | const API_SETTINGS = useStorage("API_SETTINGS", { 6 | URL: "http://localhost:8080", 7 | }) 8 | return { 9 | API_SETTINGS 10 | } 11 | } 12 | export const useGlobalSettings = () => { 13 | const globalSettings = useStorage("globalSettings", { 14 | font: "Roboto", 15 | color: "#000", 16 | subtitles_position: "center,bottom", 17 | fontsize: 20, 18 | stroke_color: "#000", 19 | stroke_width: 5, 20 | aiModel: "g4f", 21 | voice: "en_us_001", 22 | }); 23 | 24 | return { 25 | globalSettings 26 | }; 27 | } -------------------------------------------------------------------------------- /UI/composables/useMenuSetting.ts: -------------------------------------------------------------------------------- 1 | import type { MenuSetting } from "~/types/Project/Settings"; 2 | 3 | export function useMenuSetting() { 4 | console.log("useMenuSetting"); 5 | 6 | const appStore = useAppStore(); 7 | 8 | const getCollapsed = computed(() => appStore.getMenuSetting.collapsed); 9 | 10 | function getMenuSetting() { 11 | return appStore.getMenuSetting; 12 | } 13 | 14 | // Set menu configuration 15 | function setMenuSetting(menuSetting: Partial): void { 16 | appStore.setProjectSetting({ menuSetting }); 17 | } 18 | 19 | function toggleCollapsed() { 20 | console.log("toggleCollapsed"); 21 | 22 | setMenuSetting({ 23 | collapsed: !unref(getCollapsed), 24 | }); 25 | } 26 | return { 27 | getMenuSetting, 28 | setMenuSetting, 29 | getCollapsed, 30 | toggleCollapsed, 31 | }; 32 | } 33 | -------------------------------------------------------------------------------- /UI/composables/useSearchDialog.ts: -------------------------------------------------------------------------------- 1 | import { ref } from "vue"; 2 | 3 | const listener = ref(); 4 | export function useSearchDialog() { 5 | const commandIcon = ref(isWindows() ? "CTRL" : "⌘"); 6 | return { 7 | commandIcon, 8 | trigger: (cb: () => void): void => { 9 | listener.value = cb; 10 | }, 11 | open: (): void => { 12 | listener.value && listener.value(); 13 | }, 14 | }; 15 | } 16 | -------------------------------------------------------------------------------- /UI/composables/useTabs.ts: -------------------------------------------------------------------------------- 1 | import type { RouteLocationNormalized, Router } from "vue-router"; 2 | import { useRouter } from "vue-router"; 3 | import { unref } from "vue"; 4 | import { useTabsStore } from "../stores/TabsStore"; 5 | export function useTabs(_router?: Router) { 6 | console.log("useTabs"); 7 | 8 | const tabStore = useTabsStore(); 9 | const router = _router || useRouter(); 10 | 11 | const { currentRoute } = router; 12 | 13 | function getCurrentTab() { 14 | const route = unref(currentRoute); 15 | return tabStore.getTabsList.find( 16 | (item) => item.fullPath === route.fullPath 17 | )!; 18 | } 19 | 20 | return { 21 | getTabsList: () => tabStore.getTabsList, 22 | getPinnedTabsList: () => tabStore.getTabsList, 23 | getLimitTabsList: () => tabStore.getTabsList, 24 | closeTab: (tab: Tab) => tabStore.closeTab(tab), 25 | closePinnedTab: (tab: Tab) => tabStore.closePinnedTab(tab), 26 | addTab: (route: RouteLocationNormalized) => tabStore.addTab(route), 27 | pinnedTab: (tab: Tab) => tabStore.pinnedTab(tab), 28 | getCurrentTab, 29 | }; 30 | } 31 | -------------------------------------------------------------------------------- /UI/composables/useVideoSetings.ts: -------------------------------------------------------------------------------- 1 | import { useStorage } from '@vueuse/core' 2 | 3 | 4 | export interface VideoResultFormat { 5 | url: string; 6 | image: string; 7 | videoUrl?: { 8 | fileType: string; 9 | link: string; 10 | quality: string; 11 | }; 12 | type?: "local" | "remote" 13 | } 14 | 15 | export const useVideoSettings = () => { 16 | const video = useStorage<{ 17 | script: string; 18 | voice: string; 19 | videoSubject: string; 20 | extraPrompt: string; 21 | search: string; 22 | aiModel: string; 23 | finalVideoUrl: string; 24 | selectedAudio: string; 25 | selectedVideoUrls: VideoResultFormat[]; 26 | }>('VideoSettings', { 27 | script: "", 28 | voice: "en_us_001", 29 | videoSubject: "", 30 | extraPrompt: "", 31 | search: "", 32 | 33 | aiModel: "g4f", 34 | 35 | finalVideoUrl: "", 36 | // Audio related 37 | 38 | selectedAudio: "", 39 | selectedVideoUrls: [], 40 | }); 41 | 42 | 43 | return { video } 44 | } -------------------------------------------------------------------------------- /UI/content/docs/how-to-use.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'Short generator how to use' 3 | description: 'Small tutorial on how to use the short generator' 4 | --- 5 | 6 | 7 | 8 | # How to use the Short Generator 9 | 10 | 11 | 1. Click on the "Generate" button to start the process of generating a new short 12 | 1. Enter a topic of what the short will be about 13 | 2. Add extra prompt information if needed 14 | 3. Review the script 15 | 4. Select a specific voice to use or set a global default voice for all generations 16 | 5. Update the search terms if needed 17 | 1. Or can search manually and select the videos that you like by clicking on them 18 | 2, view all the selected vieos -> Click on the "Search and select videos" button and then click on the tab "Selected Videos" to see all the videos that you have selected 19 | 6. Click on the "Generate" button 20 | 7. You can add your own music to the video by selecting a music track then click on "Add music" -------------------------------------------------------------------------------- /UI/content/docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'MoneyPrinter Documentation' 3 | description: 'MoneyPrinter Documentation' 4 | --- 5 | 6 | 7 | # Documentation related to the Money Printer UI 8 | 9 | [Project roadmap](/docs/road-map) | [How to use](/docs/how-to-use) 10 | 11 | ## Getting started 12 | 13 | 14 | 15 | ### We have two options to get started 16 | 17 | 18 | #### Option one: ***Local installation*** 19 | 20 | 21 | 22 | #### Install requirements 23 | ```bash 24 | pip install -r requirements.txt 25 | ``` 26 | #### Copy .env.example and fill out values 27 | ```bash 28 | cp .env.example .env 29 | ``` 30 | #### Run the backend server 31 | ```bash 32 | cd Backend 33 | python main.py 34 | ``` 35 | #### Run the frontend server 36 | ```bash 37 | cd ../Frontend 38 | python -m http.server 3000 39 | ``` 40 | #### Run the nuxt front end 41 | ```bash 42 | cd ../UI 43 | npm install 44 | npm run dev 45 | 46 | ``` 47 | 48 | 49 | 50 | #### Option one: ***Docker container*** 51 | 52 | 53 | 1. Build the docker image 54 | ```bash 55 | docker-compose build --no-cache 56 | ``` 57 | 2. Run the docker container 58 | ```bash 59 | docker-compose up -d 60 | ``` 61 | 62 | 3. The fallowing port urls will be available 63 | 64 | 65 | [Backend](http://localhost:8080) 66 | 67 | [Frontend](http://localhost:3000) -> Basic frontend -> The port will be 3000 by default in the env but you can change it in the .env 68 | 69 | [Frontend](http://localhost:5000) -> Nuxt frontend -> The port will be 5000 by default in the env but you can change it in the .env -------------------------------------------------------------------------------- /UI/content/docs/road-map.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | #Project Roadmap: 4 | 5 | - [x] Generate the script first 6 | - [x] Let user review the script before the audio and video generation 7 | - [x] Let the user view all the generated video in a single place 8 | - [x] Let user view the generated video in the browser 9 | - [x] Let user select the audio music to add to the video 10 | 11 | - [ ] Update the view to have a better user experience 12 | - [ ] Let user preview the generate video in the same view and let user iterated on the video 13 | - [ ] Let user download the generated video 14 | - [ ] Let user upload videos to be use in the video it self 15 | - [ ] Let user upload audio to be use in the video it self 16 | - [ ] Let user have general configuration 17 | - [ ] Let add multiple video link to download 18 | - [ ] Let user select the font and upload font 19 | - [ ] Let user select the color for the text 20 | 21 | ### Features 🚀 planes: 22 | - [ ] Let user schedule the video upload to [youtube,facebook bussines,linkedin] 23 | - [ ] Let user create video from the calendar and schedule it to be uploaded -------------------------------------------------------------------------------- /UI/i18n/locales/en-US.json: -------------------------------------------------------------------------------- 1 | { 2 | "layouts": { 3 | "header": { 4 | "toggleFullScreen": "Full Screen" 5 | } 6 | }, 7 | "searchDialog": { 8 | "searchPlaceholder": "Search...", 9 | "applications": "Applications", 10 | "chatBot": "Chat", 11 | "actions": "Actions", 12 | "action": "Full Screen", 13 | "noResultsFound": "No Resoults for the fallowing qury: ", 14 | "toSelectTooltip": "Select", 15 | "toNavigateTooltip": "Navigate", 16 | "actionsOptions": { 17 | "themeToggle": "Switch theme" 18 | } 19 | }, 20 | "video": { 21 | "generate": { 22 | "step": { 23 | "one": { 24 | "title": "Please enter a video subject", 25 | "cancel": "Cancel", 26 | "generate": "Generate script", 27 | "videoSubject": { 28 | "placeholder": "Video subject..." 29 | }, 30 | "extraPrompt": { 31 | "placeholder": "Extra prompt..." 32 | } 33 | }, 34 | "two": { 35 | "script": { 36 | "placeholder": "Video generated script..." 37 | } 38 | } 39 | } 40 | } 41 | }, 42 | "view": { 43 | "generate": { 44 | "setting": { 45 | "label": "Settings" 46 | }, 47 | "music": { 48 | "label": "Music" 49 | }, 50 | "voice": { 51 | "label": "Voice" 52 | }, 53 | "subtitles": { 54 | "label": "Subtitle" 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /UI/layouts/default.vue: -------------------------------------------------------------------------------- 1 | 41 | 42 | 75 | 76 | -------------------------------------------------------------------------------- /UI/nuxt.config.ts: -------------------------------------------------------------------------------- 1 | // https://nuxt.com/docs/api/configuration/nuxt-config 2 | export default defineNuxtConfig({ 3 | ssr: false, 4 | devtools: { enabled: true }, 5 | modules: [ 6 | "@bg-dev/nuxt-naiveui", 7 | "@vueuse/nuxt", 8 | "@nuxtjs/tailwindcss", 9 | "@nuxt/content", 10 | "nuxt-icon", 11 | "@pinia/nuxt", 12 | "@unocss/nuxt", 13 | "@nuxtjs/i18n", 14 | "nuxt-lodash", 15 | ], 16 | css: ["~/assets/scss/main.scss"], 17 | tailwindcss: { 18 | exposeConfig: { 19 | write: true, 20 | }, 21 | }, 22 | content: { 23 | markdown: { 24 | anchorLinks: false, 25 | }, 26 | }, 27 | i18n: { 28 | locales: [ 29 | { 30 | code: "en", 31 | file: "en-US.json", 32 | }, 33 | ], 34 | lazy: true, 35 | langDir: "locales", 36 | defaultLocale: "en", 37 | }, 38 | runtimeConfig: { 39 | public: { 40 | pexelsApiKey: process.env.PEXELS_API_KEY, 41 | }, 42 | }, 43 | }); 44 | -------------------------------------------------------------------------------- /UI/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nuxt-app", 3 | "private": true, 4 | "type": "module", 5 | "scripts": { 6 | "build": "nuxt build --dotenv .env ", 7 | "dev": "nuxt dev --dotenv ../.env ", 8 | "generate": "nuxt generate", 9 | "preview": "nuxt preview", 10 | "postinstall": "nuxt prepare", 11 | "start": "node .output/server/index.mjs" 12 | }, 13 | "dependencies": { 14 | "@pinia/nuxt": "^0.9.0", 15 | "@unocss/nuxt": "^0.65.2", 16 | "nuxt": "^3.14.1592", 17 | "tabulator-tables": "^6.3.0", 18 | "vue": "^3.5.13", 19 | "vue-router": "^4.5.0" 20 | }, 21 | "devDependencies": { 22 | "@bg-dev/nuxt-naiveui": "^1.14.0", 23 | "@nuxt/content": "^2.13.4", 24 | "@nuxt/devtools": "^1.6.4", 25 | "@nuxtjs/i18n": "^9.1.1", 26 | "@nuxtjs/tailwindcss": "^6.12.2", 27 | "@tailwindcss/typography": "^0.5.15", 28 | "@types/tabulator-tables": "^6.2.3", 29 | "@vueuse/nuxt": "^12.0.0", 30 | "nuxt-icon": "^0.6.10", 31 | "nuxt-lodash": "^2.5.3", 32 | "sass": "^1.71.0" 33 | } 34 | } -------------------------------------------------------------------------------- /UI/pages/docs/[...slug].vue: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /UI/pages/generate/index.vue: -------------------------------------------------------------------------------- 1 | 209 | 210 | 417 | 418 | -------------------------------------------------------------------------------- /UI/pages/index.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 21 | 22 | -------------------------------------------------------------------------------- /UI/pages/search.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/pages/settings.vue: -------------------------------------------------------------------------------- 1 | 110 | 111 | 195 | 196 | -------------------------------------------------------------------------------- /UI/pages/videos/index.vue: -------------------------------------------------------------------------------- 1 | 21 | 22 | 49 | 50 | -------------------------------------------------------------------------------- /UI/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/UI/public/favicon.ico -------------------------------------------------------------------------------- /UI/server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../.nuxt/tsconfig.server.json" 3 | } 4 | -------------------------------------------------------------------------------- /UI/stores/AppStore.ts: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import type { DeepPartial } from "unocss"; 3 | import { 4 | type HeaderSetting, 5 | type MenuSetting, 6 | type ProjectSetting, 7 | type TransitionSetting, 8 | RouterTransitionConstants, 9 | } from "~/types/Project/Settings"; 10 | 11 | const APP_STORE_ID = "MONEY_PRINTER"; 12 | const DEFAULT_PROJECT_SETTING = { 13 | shouldShowSettingButton: true, 14 | locale: "en", 15 | shouldShowFullContent: false, 16 | shouldShowLogo: true, 17 | shouldShowFooter: true, 18 | headerSetting: { 19 | shouldShow: true, 20 | shouldShowFullScreen: true, 21 | shouldShowSearch: true, 22 | shouldShowNotice: true, 23 | shouldShowSettingDrawer: false, 24 | }, 25 | menuSetting: { 26 | collapsed: false, 27 | }, 28 | transitionSetting: { 29 | shouldEnable: true, 30 | routerBasicTransition: RouterTransitionConstants.FADE, 31 | shouldOpenPageLoading: true, 32 | shouldOpenNProgress: true, 33 | }, 34 | shouldOpenKeepAlive: true, 35 | lockTime: 0, 36 | shouldShowBreadCrumb: true, 37 | shouldShowBreadCrumbIcon: true, 38 | shouldUseErrorHandle: false, 39 | shouldUseOpenBackTop: true, 40 | canEmbedIFramePage: true, 41 | shouldCloseMessageOnSwitch: true, 42 | shouldRemoveAllHttpPending: false, 43 | }; 44 | interface AppState { 45 | // project config 46 | projectSetting: ProjectSetting; 47 | // Page loading status 48 | pageLoading: boolean; 49 | } 50 | 51 | let pageLoadingTimeout: ReturnType; 52 | export const useAppStore = defineStore({ 53 | id: APP_STORE_ID, 54 | state: (): AppState => ({ 55 | projectSetting: DEFAULT_PROJECT_SETTING, 56 | pageLoading: true, 57 | }), 58 | getters: { 59 | getPageLoading(state): boolean { 60 | return state.pageLoading; 61 | }, 62 | 63 | getProjectSetting(state): ProjectSetting { 64 | return state.projectSetting || ({} as ProjectSetting); 65 | }, 66 | 67 | getMenuSetting(): MenuSetting { 68 | return this.getProjectSetting.menuSetting; 69 | }, 70 | 71 | getHeaderSetting(): HeaderSetting { 72 | return this.getProjectSetting.headerSetting; 73 | }, 74 | 75 | getTransitionSetting(): TransitionSetting { 76 | return this.getProjectSetting.transitionSetting; 77 | }, 78 | }, 79 | actions: { 80 | setPageLoading(loading: boolean): void { 81 | this.pageLoading = loading; 82 | }, 83 | 84 | setProjectSetting(config: DeepPartial): void { 85 | //Merge the current config with the default config 86 | this.projectSetting = { 87 | ...this.projectSetting, 88 | ...config, 89 | } as ProjectSetting; 90 | }, 91 | 92 | setMenuSetting(menuSetting: Partial): void { 93 | this.setProjectSetting({ menuSetting }); 94 | }, 95 | 96 | setHeaderSetting(headerSetting: Partial): void { 97 | this.setProjectSetting({ headerSetting }); 98 | }, 99 | 100 | setTransitionSetting(transitionSetting: Partial): void { 101 | this.setProjectSetting({ transitionSetting }); 102 | }, 103 | 104 | setPageLoadingAction(loading: boolean) { 105 | clearTimeout(pageLoadingTimeout); 106 | if (loading) { 107 | // Prevent flicker by delaying the setPageLoading call 108 | pageLoadingTimeout = setTimeout(() => { 109 | this.setPageLoading(loading); 110 | }, 50); 111 | } else { 112 | this.setPageLoading(loading); 113 | } 114 | }, 115 | 116 | resetAPPState() { 117 | this.setProjectSetting(DEFAULT_PROJECT_SETTING); 118 | }, 119 | }, 120 | }); 121 | -------------------------------------------------------------------------------- /UI/stores/TabsStore.ts: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import type { 3 | RouteLocationNormalized, 4 | RouteRecordName, 5 | RouteRecordRaw, 6 | } from "vue-router"; 7 | 8 | const APP_TABS_STORE_ID = "APP_TABS_STORE"; 9 | export const LAYOUT = () => import("~/layouts/default.vue"); 10 | export const EXCEPTION_COMPONENT = () => import("~/components/ErrorView.vue"); 11 | export const PAGE_NOT_FOUND_ROUTE: RouteRecordRaw = { 12 | path: "/:path(.*)*", 13 | name: "PageNotFound", 14 | component: LAYOUT, 15 | meta: { 16 | title: "ErrorPage", 17 | shouldHideInMenu: true, 18 | shouldHideBreadcrumb: true, 19 | }, 20 | children: [ 21 | { 22 | path: "/:path(.*)*", 23 | name: "PageNotFound", 24 | component: EXCEPTION_COMPONENT, 25 | meta: { 26 | title: "ErrorPage", 27 | shouldHideInMenu: true, 28 | shouldHideBreadcrumb: true, 29 | }, 30 | }, 31 | ], 32 | }; 33 | export const REDIRECT_ROUTE: RouteRecordRaw = { 34 | path: "/redirect", 35 | component: LAYOUT, 36 | name: "RedirectTo", 37 | meta: { 38 | title: "Redirect", 39 | shouldHideBreadcrumb: true, 40 | shouldHideInMenu: true, 41 | }, 42 | children: [ 43 | { 44 | path: "/redirect/:path(.*)", 45 | name: "Redirect", 46 | component: () => import("~/components/RedirectView.vue"), 47 | meta: { 48 | title: "Redirect", 49 | shouldHideBreadcrumb: true, 50 | }, 51 | }, 52 | ], 53 | }; 54 | 55 | export enum PageConstants { 56 | // basic videos path 57 | BASE_LOGIN = "/videos", 58 | // basic home path 59 | BASE_HOME = "/dashboard", 60 | // error page path 61 | ERROR_PAGE = "/exception", 62 | } 63 | interface AppTabsState { 64 | tabs: Tab[]; 65 | pinnedTabs: Tab[]; 66 | maxVisibleTabs: number; 67 | } 68 | export interface Tab { 69 | name: RouteRecordName; 70 | fullPath: string; 71 | title: string; 72 | } 73 | export const useTabsStore = defineStore({ 74 | id: APP_TABS_STORE_ID, 75 | state: (): AppTabsState => ({ 76 | tabs: [{ fullPath: "/", name: "Home", title: "Home" }], 77 | pinnedTabs: [], 78 | maxVisibleTabs: 3, 79 | }), 80 | getters: { 81 | getTabsList(state): Tab[] { 82 | return state.tabs; 83 | }, 84 | getLimitTabsList(state): Tab[] { 85 | if (isGreaterOrEqual2xl.value) { 86 | state.maxVisibleTabs = 3; 87 | } else { 88 | state.maxVisibleTabs = 1; 89 | } 90 | return useTakeRight( 91 | state.tabs 92 | .filter( 93 | (tab) => 94 | state.pinnedTabs.findIndex((p) => p.fullPath === tab.fullPath) === 95 | -1 96 | ) 97 | .reverse(), 98 | state.maxVisibleTabs 99 | ); 100 | }, 101 | getPinnedTabsList(state): Tab[] { 102 | return state.pinnedTabs; 103 | }, 104 | }, 105 | actions: { 106 | addTab(route: RouteLocationNormalized) { 107 | const { path, name, meta } = route; 108 | if ( 109 | !name || 110 | path === PageConstants.ERROR_PAGE || 111 | path === PageConstants.BASE_LOGIN || 112 | ["Redirect", "PageNotFound"].includes(name as string) 113 | ) { 114 | return; 115 | } 116 | const title = 117 | (meta?.title as string) || name.toString().split("-").at(-1); 118 | if (title) { 119 | const newTab: Tab = { name, fullPath: route.fullPath, title }; 120 | this.tabs = useUniqBy([newTab, ...this.tabs], "fullPath"); 121 | } 122 | }, 123 | close(isPinned: boolean, tab: Tab) { 124 | const targetTabs = isPinned ? this.pinnedTabs : this.tabs; 125 | this.tabs = targetTabs.filter( 126 | (currentTab) => currentTab.fullPath !== tab.fullPath 127 | ); 128 | }, 129 | closeTab(tab: Tab) { 130 | this.close(false, tab); 131 | }, 132 | closePinnedTab(tab: Tab) { 133 | this.close(true, tab); 134 | }, 135 | pinnedTab(tab: Tab) { 136 | const isPresent = this.pinnedTabs.some( 137 | (pinnedTab) => pinnedTab.fullPath === tab.fullPath 138 | ); 139 | if (!isPresent) { 140 | this.pinnedTabs = [tab, ...this.pinnedTabs]; 141 | } 142 | return true; 143 | }, 144 | resetTabsState() { 145 | this.tabs = []; 146 | this.pinnedTabs = []; 147 | }, 148 | }, 149 | }); 150 | -------------------------------------------------------------------------------- /UI/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | export default >{ 4 | darkMode: "class", 5 | plugins: [require("@tailwindcss/typography")], 6 | }; 7 | -------------------------------------------------------------------------------- /UI/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // https://nuxt.com/docs/guide/concepts/typescript 3 | "extends": "./.nuxt/tsconfig.json" 4 | // "include": ["@types/tabulator-tables"] 5 | } 6 | -------------------------------------------------------------------------------- /UI/types/Menu/index.ts: -------------------------------------------------------------------------------- 1 | import type { RouteMeta } from "vue-router"; 2 | 3 | export enum RoleConstants { 4 | ADMIN = "admin", 5 | USER = "user", 6 | GUEST = "guest", 7 | } 8 | 9 | export interface Menu { 10 | name: string; 11 | icon?: string; 12 | path: string; 13 | paramPath?: string; 14 | shouldDisabled?: boolean; 15 | children?: Menu[]; 16 | orderNumber?: number; 17 | allowedRoles?: RoleConstants[]; 18 | meta?: Partial; 19 | shouldHideMenu?: boolean; 20 | description?: string; 21 | data?: Record; 22 | shouldShow?: boolean; 23 | } 24 | -------------------------------------------------------------------------------- /UI/types/Project/Settings.ts: -------------------------------------------------------------------------------- 1 | export enum RouterTransitionConstants { 2 | /** 3 | * A transition that zooms in and fades out the previous route, then zooms out and fades in the new route. 4 | */ 5 | ZOOM_FADE = "zoom-fade", 6 | 7 | /** 8 | * A transition that zooms out and fades out the previous route, then fades in the new route. 9 | */ 10 | ZOOM_OUT = "zoom-out", 11 | 12 | /** 13 | * A transition that fades out the previous route to the side, then fades in the new route from the opposite side. 14 | */ 15 | FADE_SLIDE = "fade-slide", 16 | 17 | /** 18 | * A simple fade transition. 19 | */ 20 | FADE = "fade", 21 | 22 | /** 23 | * A transition that fades out the previous route to the bottom, then fades in the new route from the bottom. 24 | */ 25 | FADE_BOTTOM = "fade-bottom", 26 | 27 | /** 28 | * A transition that scales down and fades out the previous route, then scales up and fades in the new route. 29 | */ 30 | FADE_SCALE = "fade-scale", 31 | } 32 | 33 | export interface TransitionSetting { 34 | // Whether to open the page switching animation 35 | shouldEnable: boolean; 36 | // Route basic switching animation 37 | routerBasicTransition: RouterTransitionConstants; 38 | // Whether to open page switching loading 39 | shouldOpenPageLoading: boolean; 40 | // Whether to open the top progress bar 41 | shouldOpenNProgress: boolean; 42 | } 43 | 44 | export interface HeaderSetting { 45 | // Whether to display the website header 46 | shouldShow: boolean; 47 | // Whether to display the full screen button 48 | shouldShowFullScreen: boolean; 49 | // Whether to display the search 50 | shouldShowSearch: boolean; 51 | // Whether to display the notice 52 | shouldShowNotice: boolean; 53 | // Whether to display the setting drawer 54 | shouldShowSettingDrawer: boolean; 55 | } 56 | export interface MenuSetting { 57 | collapsed: boolean; 58 | } 59 | export interface ProjectSetting { 60 | // Whether to display the setting button 61 | shouldShowSettingButton: boolean; 62 | // The locale 63 | locale: string; 64 | // Whether to display the dark mode toggle button 65 | // Whether to display the main interface in full screen, without menu and top bar 66 | shouldShowFullContent: boolean; 67 | // Whether to display the logo 68 | shouldShowLogo: boolean; 69 | // Whether to display the global footer 70 | shouldShowFooter: boolean; 71 | // The header setting 72 | headerSetting: HeaderSetting; 73 | // The menu setting 74 | menuSetting: MenuSetting; 75 | // The animation configuration 76 | transitionSetting: TransitionSetting; 77 | // Whether to enable keep-alive for page layout 78 | shouldOpenKeepAlive: boolean; 79 | // The lock screen time 80 | lockTime: number; 81 | // Whether to display the breadcrumb 82 | shouldShowBreadCrumb: boolean; 83 | // Whether to display the breadcrumb icon 84 | shouldShowBreadCrumbIcon: boolean; 85 | // Whether to use the error-handler-plugin 86 | shouldUseErrorHandle: boolean; 87 | // Whether to enable the back to top function 88 | shouldUseOpenBackTop: boolean; 89 | // Whether to embed iframe pages 90 | canEmbedIFramePage: boolean; 91 | // Whether to delete unclosed messages and notify when switching pages 92 | shouldCloseMessageOnSwitch: boolean; 93 | // Whether to cancel sent but unresponsive http requests when switching pages 94 | shouldRemoveAllHttpPending: boolean; 95 | } 96 | 97 | export enum SettingButtonPositionConstants { 98 | // Automatically adjust according to menu type 99 | AUTO = "auto", 100 | // Display in the top menu bar 101 | HEADER = "header", 102 | // Fixed display in the lower right corner 103 | FIXED = "fixed", 104 | } 105 | -------------------------------------------------------------------------------- /UI/types/Search/index.ts: -------------------------------------------------------------------------------- 1 | export interface SearchGroupItem { 2 | iconName: string | null | undefined; 3 | iconImage: string | null; 4 | key: number | string; 5 | title: string; 6 | label: string; 7 | tags?: string; 8 | action: () => void; 9 | } 10 | 11 | export interface SearchGroup { 12 | name: string; 13 | items: SearchGroupItem[]; 14 | } 15 | export type SearchGroups = SearchGroup[]; 16 | -------------------------------------------------------------------------------- /UI/uno.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "unocss"; 2 | 3 | export default defineConfig({ 4 | // ...UnoCSS options 5 | }); 6 | -------------------------------------------------------------------------------- /UI/utils/PlatformUtils.ts: -------------------------------------------------------------------------------- 1 | export enum OperatingSystem { 2 | Windows = "Windows", 3 | MacOS = "MacOS", 4 | UNIX = "UNIX", 5 | Linux = "Linux", 6 | Unknown = "Unknown", 7 | } 8 | export type OS = keyof typeof OperatingSystem; 9 | export function detectOperatingSystem(): OS { 10 | const { userAgent } = navigator || { userAgent: "" }; 11 | if (userAgent.includes("Win")) { 12 | return OperatingSystem.Windows; 13 | } 14 | if (userAgent.includes("Mac")) { 15 | return OperatingSystem.MacOS; 16 | } 17 | if (userAgent.includes("X11")) { 18 | return OperatingSystem.UNIX; 19 | } 20 | if (userAgent.includes("Linux")) { 21 | return OperatingSystem.Linux; 22 | } 23 | 24 | return OperatingSystem.Unknown; 25 | } 26 | export function isWindows(): boolean { 27 | return detectOperatingSystem() === OperatingSystem.Windows; 28 | } 29 | -------------------------------------------------------------------------------- /UI/utils/RouteHelpers.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | RouteLocationNormalized, 3 | RouteRecordNormalized, 4 | } from "vue-router"; 5 | 6 | export function getRawRoute( 7 | route: RouteLocationNormalized 8 | ): RouteLocationNormalized { 9 | if (!route) { 10 | return route; 11 | } 12 | const { matched, ...otherProps } = route; 13 | return { 14 | ...otherProps, 15 | matched: matched?.map(({ meta, name, path }) => ({ 16 | meta, 17 | name, 18 | path, 19 | })) as RouteRecordNormalized[], 20 | }; 21 | } 22 | 23 | const key = Symbol("route change event"); 24 | const emitter = mitt<{ [key]: RouteLocationNormalized }>(); 25 | let lastTab: RouteLocationNormalized; 26 | 27 | export function notifyRouteChange(newRoute: RouteLocationNormalized) { 28 | const rawRoute = getRawRoute(newRoute); 29 | emitter.emit(key, rawRoute); 30 | lastTab = rawRoute; 31 | } 32 | 33 | export function listenToRouteChange( 34 | callback: (route: RouteLocationNormalized) => void, 35 | immediate = true 36 | ) { 37 | emitter.on(key, callback); 38 | immediate && lastTab && callback(lastTab); 39 | } 40 | 41 | export function removeRouteChangeListener() { 42 | emitter.all.clear(); 43 | } 44 | -------------------------------------------------------------------------------- /UI/utils/ScreenUtils.ts: -------------------------------------------------------------------------------- 1 | import { breakpointsTailwind, useBreakpoints } from "@vueuse/core"; 2 | 3 | export const breakpoints = useBreakpoints(breakpointsTailwind); 4 | export const isMediumOrLargeScreen = breakpoints.between("sm", "xl"); 5 | export const isExtraLargeScreen = breakpoints.smallerOrEqual("xl"); 6 | export const isSmallerOrEqualSm = breakpoints.smallerOrEqual("sm"); 7 | export const isSmallerOrEqualMd = breakpoints.smallerOrEqual("md"); 8 | export const isSmallerOrEqualLg = breakpoints.smallerOrEqual("lg"); 9 | export const isSmallerOrEqualXl = breakpoints.smallerOrEqual("xl"); 10 | export const isSmallerOrEqual2xl = breakpoints.smallerOrEqual("2xl"); 11 | export const isGreaterOrEqualSm = breakpoints.greaterOrEqual("sm"); 12 | export const isGreaterOrEqualMd = breakpoints.greaterOrEqual("md"); 13 | export const isGreaterOrEqualLg = breakpoints.greaterOrEqual("lg"); 14 | export const isGreaterOrEqualXl = breakpoints.greaterOrEqual("xl"); 15 | export const isGreaterOrEqual2xl = breakpoints.greaterOrEqual("2xl"); 16 | -------------------------------------------------------------------------------- /UI/utils/mitt.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * copy to https://github.com/developit/mitt 3 | * Expand clear method 4 | */ 5 | export type EventType = string | symbol; 6 | 7 | // An event handler can take an optional event argument 8 | // and should not return a value 9 | export type Handler = (event: T) => void; 10 | export type WildcardHandler> = ( 11 | type: keyof T, 12 | event: T[keyof T] 13 | ) => void; 14 | 15 | // An array of all currently registered event handlers for a type 16 | export type EventHandlerList = Array>; 17 | export type WildCardEventHandlerList> = Array< 18 | WildcardHandler 19 | >; 20 | 21 | // A map of event types and their corresponding event handlers. 22 | export type EventHandlerMap> = Map< 23 | keyof Events | "*", 24 | EventHandlerList | WildCardEventHandlerList 25 | >; 26 | 27 | export interface Emitter> { 28 | all: EventHandlerMap; 29 | 30 | on(type: Key, handler: Handler): void; 31 | on(type: "*", handler: WildcardHandler): void; 32 | 33 | off( 34 | type: Key, 35 | handler?: Handler 36 | ): void; 37 | off(type: "*", handler: WildcardHandler): void; 38 | 39 | emit(type: Key, event: Events[Key]): void; 40 | emit( 41 | type: undefined extends Events[Key] ? Key : never 42 | ): void; 43 | } 44 | 45 | /** 46 | * Mitt: Tiny (~200b) functional event emitter / pubsub. 47 | * @name mitt 48 | * @returns {Mitt} 49 | */ 50 | export default function mitt>( 51 | all?: EventHandlerMap 52 | ): Emitter { 53 | type GenericEventHandler = 54 | | Handler 55 | | WildcardHandler; 56 | all = all || new Map(); 57 | 58 | return { 59 | /** 60 | * A Map of event names to registered handler functions. 61 | */ 62 | all, 63 | 64 | /** 65 | * Register an event handler for the given type. 66 | * @param {string|symbol} type Type of event to listen for, or `'*'` for all events 67 | * @param {Function} handler Function to call in response to given event 68 | * @memberOf mitt 69 | */ 70 | on(type: Key, handler: GenericEventHandler) { 71 | const handlers: Array | undefined = all!.get(type); 72 | if (handlers) { 73 | handlers.push(handler); 74 | } else { 75 | all!.set(type, [handler] as EventHandlerList); 76 | } 77 | }, 78 | 79 | /** 80 | * Remove an event handler for the given type. 81 | * If `handler` is omitted, all handlers of the given type are removed. 82 | * @param {string|symbol} type Type of event to unregister `handler` from (`'*'` to remove a wildcard handler) 83 | * @param {Function} [handler] Handler function to remove 84 | * @memberOf mitt 85 | */ 86 | off(type: Key, handler?: GenericEventHandler) { 87 | const handlers: Array | undefined = all!.get(type); 88 | if (handlers) { 89 | if (handler) { 90 | handlers.splice(handlers.indexOf(handler) >>> 0, 1); 91 | } else { 92 | all!.set(type, []); 93 | } 94 | } 95 | }, 96 | 97 | /** 98 | * Invoke all handlers for the given type. 99 | * If present, `'*'` handlers are invoked after type-matched handlers. 100 | * 101 | * Note: Manually firing '*' handlers is not supported. 102 | * 103 | * @param {string|symbol} type The event type to invoke 104 | * @param {Any} [evt] Any value (object is recommended and powerful), passed to each handler 105 | * @memberOf mitt 106 | */ 107 | emit(type: Key, evt?: Events[Key]) { 108 | let handlers = all!.get(type); 109 | if (handlers) { 110 | (handlers as EventHandlerList) 111 | .slice() 112 | .map((handler) => { 113 | handler(evt!); 114 | }); 115 | } 116 | 117 | handlers = all!.get("*"); 118 | if (handlers) { 119 | (handlers as WildCardEventHandlerList) 120 | .slice() 121 | .map((handler) => { 122 | handler(type, evt!); 123 | }); 124 | } 125 | }, 126 | }; 127 | } 128 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | api: 3 | build: 4 | context: . 5 | dockerfile: Dockerfile 6 | ports: 7 | - ${API_PORT:-8080}:8080 8 | environment: 9 | - PEXELS_API_KEY=${PEXELS_API_KEY} 10 | - TIKTOK_SESSION_ID=${TIKTOK_SESSION_ID} 11 | - IMAGEMAGICK_BINARY=${IMAGEMAGICK_BINARY} 12 | - OPENAI_API_KEY=${OPENAI_API_KEY} 13 | - ASSEMBLY_AI_API_KEY=${ASSEMBLY_AI_API_KEY} 14 | - GOOGLE_CLIENT_ID=${GOOGLE_CLIENT_ID} 15 | - GOOGLE_CLIENT_SECRET=${GOOGLE_CLIENT_SECRET} 16 | - GOOGLE_REFRESH_TOKEN=${GOOGLE_REFRESH_TOKEN} 17 | env_file: 18 | - .env 19 | volumes: 20 | - ./Backend:/home/app/Backend 21 | restart: unless-stopped 22 | 23 | nuxt: 24 | build: 25 | context: . 26 | dockerfile: Dockerfile.FE.Nuxt 27 | env_file: 28 | - .env 29 | ports: 30 | - ${FE_NUXT}:3000 31 | -------------------------------------------------------------------------------- /logo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/logo.jpeg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | g4f==0.3.8.3 2 | setuptools==69.2.0 3 | wheel==0.38.4 4 | requests==2.31.0 5 | moviepy==1.0.3 6 | termcolor==2.4.0 7 | flask==3.0.0 8 | flask-cors==4.0.0 9 | playsound==1.3.0 10 | Pillow==9.5.0 11 | python-dotenv==1.0.0 12 | srt_equalizer==0.1.8 13 | platformdirs==4.1.0 14 | undetected_chromedriver 15 | assemblyai 16 | brotli 17 | google-api-python-client 18 | oauth2client 19 | openai 20 | google-generativeai 21 | curl_cffi 22 | yt-dlp --------------------------------------------------------------------------------