├── .env.example ├── .github ├── FUNDING.yml └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .vscode └── settings.json ├── Backend ├── classes │ ├── Shorts.py │ └── instagram_downloader.py ├── gpt.py ├── main.py ├── search.py ├── settings.py ├── static │ ├── assets │ │ ├── fonts │ │ │ ├── .gitKeep │ │ │ └── bold_font.ttf │ │ ├── images │ │ │ ├── Screen1.png │ │ │ ├── Screenshot2.png │ │ │ └── Screenshot3.png │ │ └── music │ │ │ └── .gitKeep │ └── generated_videos │ │ └── .gitKeep ├── tiktokvoice.py ├── utils.py ├── video.py └── youtube.py ├── Dockerfile ├── Dockerfile.FE ├── Dockerfile.FE.Nuxt ├── EnvironmentVariables.md ├── Frontend ├── app.js └── index.html ├── LICENSE ├── README.md ├── UI ├── .gitignore ├── .npmrc ├── .vscode │ └── settings.json ├── README.md ├── app.config.ts ├── app.vue ├── assets │ └── scss │ │ ├── helpers │ │ └── _transition.scss │ │ └── main.scss ├── components │ ├── ActionIcon.vue │ ├── AllSettings.vue │ ├── ErrorView.vue │ ├── GenerateScript.vue │ ├── HeaderLayout.vue │ ├── InstagramVideos.vue │ ├── LayoutTabs.vue │ ├── MultiStepLoader.vue │ ├── MusicSettings.vue │ ├── NaiveLayoutSidebar.vue │ ├── RedirectView.vue │ ├── SearchDialog.vue │ ├── SearchTrigger.vue │ ├── SubtitleSettings.vue │ ├── ToolTipper.vue │ ├── VideoSearch.vue │ ├── VideoSelected.vue │ ├── VideosTable.vue │ ├── VoiceSettings.vue │ └── instagram.vue ├── composables │ ├── useGlobalSettings.ts │ ├── useMenuSetting.ts │ ├── useSearchDialog.ts │ ├── useTabs.ts │ └── useVideoSetings.ts ├── content │ └── docs │ │ ├── how-to-use.md │ │ ├── index.md │ │ └── road-map.md ├── i18n │ └── locales │ │ └── en-US.json ├── layouts │ └── default.vue ├── nuxt.config.ts ├── package.json ├── pages │ ├── docs │ │ └── [...slug].vue │ ├── generate │ │ └── index.vue │ ├── index.vue │ ├── search.vue │ ├── settings.vue │ └── videos │ │ └── index.vue ├── pnpm-lock.yaml ├── public │ └── favicon.ico ├── server │ └── tsconfig.json ├── stores │ ├── AppStore.ts │ └── TabsStore.ts ├── tailwind.config.ts ├── tsconfig.json ├── types │ ├── Menu │ │ └── index.ts │ ├── Project │ │ └── Settings.ts │ └── Search │ │ └── index.ts ├── uno.config.ts └── utils │ ├── PlatformUtils.ts │ ├── RouteHelpers.ts │ ├── ScreenUtils.ts │ └── mitt.ts ├── docker-compose.yml ├── logo.jpeg └── requirements.txt /.env.example: -------------------------------------------------------------------------------- 1 | # See EnvironmentVariables.md for more information. 2 | 3 | # Necessary API Keys 4 | # ------------------- 5 | 6 | # TikTok Session ID 7 | # Obtain your session ID by logging into TikTok and copying the sessionid cookie. 8 | TIKTOK_SESSION_ID="" 9 | 10 | # ImageMagick Binary Path 11 | # Download ImageMagick from https://imagemagick.org/script/download.php 12 | IMAGEMAGICK_BINARY="/usr/bin/convert" 13 | 14 | # Pexels API Key 15 | # Register at https://www.pexels.com/api/ to get your API key. 16 | PEXELS_API_KEY="" 17 | 18 | # Optional API Keys 19 | # ----------------- 20 | 21 | # OpenAI API Key 22 | # Visit https://openai.com/api/ for details on obtaining an API key. 23 | OPENAI_API_KEY="" 24 | 25 | # AssemblyAI API Key 26 | # Sign up at https://www.assemblyai.com/ to receive an API key. 27 | ASSEMBLY_AI_API_KEY="" 28 | 29 | # Google API Key 30 | # Generate your API key through https://makersuite.google.com/app/apikey 31 | GOOGLE_API_KEY="" 32 | 33 | # Front end port 34 | FE_PORT=3000 35 | # Alternate front end port 36 | FE_NUXT=5000 37 | # Backend port 38 | API_PORT=8080 39 | 40 | API_URL='http://localhost:8080/api' -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [leamsigc] 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: leamsigc 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. Linux, Windows] 28 | - Browser [e.g. chrome, edge] 29 | - Python Version [e.g. 3.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: 'leamsigc' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | temp/* 4 | sounds/* 5 | output/* 6 | images/* 7 | *.zip 8 | *.srt 9 | *.mp4 10 | *.mp3 11 | .history 12 | subtitles/* 13 | /venv 14 | client_secret.json 15 | main.py-oauth2.json 16 | .DS_Store 17 | Backend/output* 18 | Songs/ 19 | node_modules 20 | /UI/.nuxt -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "node_modules/typescript/lib", 3 | "i18n-ally.localesPaths": ["UI/locales"], 4 | "i18n-ally.keystyle": "nested" 5 | } 6 | -------------------------------------------------------------------------------- /Backend/classes/Shorts.py: -------------------------------------------------------------------------------- 1 | import os 2 | from utils import * 3 | 4 | from settings import * 5 | from gpt import * 6 | from search import * 7 | from termcolor import colored 8 | from flask import jsonify,json 9 | from video import * 10 | from tiktokvoice import * 11 | from uuid import uuid4 12 | from apiclient.errors import HttpError 13 | from moviepy.config import change_settings 14 | 15 | class Shorts: 16 | """ 17 | Class for creating VideoShorts. 18 | 19 | Steps to create a Video Short: 20 | 1. Generate a script [DONE] 21 | 2. Generate metadata (Title, Description, Tags) [DONE] 22 | 3. Get subtitles [DONE] 23 | 4. Get Videos related to the search term [DONE] 24 | 5. Convert Text-to-Speech [DONE] 25 | 6. Combine Videos [DONE] 26 | 7. Combine Videos with the Text-to-Speech [DONE] 27 | 7. Combine Videos with the Text-to-Speech [DONE] 28 | """ 29 | def __init__(self,video_subject: str, paragraph_number: int, ai_model: str,customPrompt: str="", extra_prompt: str = ""): 30 | """ 31 | Constructor for YouTube Class. 32 | 33 | Args: 34 | video_subject (str): The subject of the video. 35 | paragraph_number (int): The number of paragraphs to generate. 36 | ai_model (str): The AI model to use for generation. 37 | customPrompt (str): The custom prompt to use for generation. 38 | extra_prompt (str): The extra prompt to use for generation. 39 | 40 | Returns: 41 | None 42 | """ 43 | global GENERATING 44 | GENERATING = True 45 | 46 | 47 | change_settings({"IMAGEMAGICK_BINARY": os.getenv("IMAGEMAGICK_BINARY")}) 48 | 49 | 50 | self.video_subject = video_subject 51 | self.paragraph_number = paragraph_number 52 | self.ai_model = ai_model 53 | self.customPrompt = customPrompt 54 | self.extra_prompt = extra_prompt 55 | self.globalSettings = get_settings() 56 | 57 | 58 | # Generate a script 59 | self.final_script = "" 60 | self.search_terms = [] 61 | self.AMOUNT_OF_STOCK_VIDEOS= 5 62 | 63 | # Video from pexels 64 | self.video_urls = [] 65 | self.video_paths = [] 66 | self.videos_quantity_search = 15 67 | self.min_duration_search = 5 68 | # Voice related variables 69 | self.voice = "en_us_001" 70 | self.voice_prefix = self.voice[:2] 71 | 72 | # Audio and subtitles 73 | self.tts_path = None 74 | self.subtitles_path = None 75 | 76 | # Final video 77 | self.final_video_path = None 78 | 79 | # Video metadata 80 | self.video_title = None 81 | self.video_description = None 82 | self.video_tags = None 83 | 84 | # Subtitle 85 | self.subtitles_position="" 86 | self.final_music_video_path="" 87 | 88 | @property 89 | def get_final_video_path(self): 90 | return self.final_video_path 91 | @property 92 | def get_final_music_video_path(self): 93 | return self.final_music_video_path 94 | 95 | @property 96 | def get_final_script(self): 97 | return self.final_script 98 | 99 | @property 100 | def get_tts_path(self): 101 | return self.tts_path 102 | 103 | @property 104 | def get_subtitles_path(self): 105 | return self.subtitles_path 106 | 107 | @property 108 | def get_video_paths(self): 109 | return self.video_paths 110 | 111 | def GenerateScript(self): 112 | """ 113 | Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model. 114 | 115 | Args: 116 | video_subject (str): The subject of the video. 117 | paragraph_number (int): The number of paragraphs to generate. 118 | ai_model (str): The AI model to use for generation. 119 | Returns: 120 | 121 | str: The script for the video. 122 | """ 123 | 124 | if self.customPrompt and self.customPrompt != "": 125 | prompt = self.customPrompt 126 | else: 127 | prompt = self.globalSettings["scriptSettings"]["defaultPromptStart"] 128 | 129 | prompt += f""" 130 | # Initialization: 131 | - video subject: {self.video_subject} 132 | - number of paragraphs: {self.paragraph_number} 133 | {self.extra_prompt} 134 | 135 | """ 136 | # Add the global prompt end 137 | prompt += self.globalSettings["scriptSettings"]["defaultPromptEnd"] 138 | 139 | # Generate script 140 | response = generate_response(prompt, self.ai_model) 141 | 142 | print(colored(response, "cyan")) 143 | 144 | # Return the generated script 145 | if response: 146 | # Clean the script 147 | # Remove asterisks, hashes 148 | response = response.replace("*", "") 149 | response = response.replace("#", "") 150 | 151 | # Remove markdown syntax 152 | response = re.sub(r"\[.*\]", "", response) 153 | response = re.sub(r"\(.*\)", "", response) 154 | 155 | # Split the script into paragraphs 156 | paragraphs = response.split("\n\n") 157 | 158 | # Select the specified number of paragraphs 159 | selected_paragraphs = paragraphs[:self.paragraph_number] 160 | 161 | # Join the selected paragraphs into a single string 162 | final_script = "\n\n".join(selected_paragraphs) 163 | 164 | # Print to console the number of paragraphs used 165 | print(colored(f"Number of paragraphs used: {len(selected_paragraphs)}", "green")) 166 | 167 | self.final_script = final_script 168 | 169 | return final_script 170 | else: 171 | print(colored("[-] GPT returned an empty response.", "red")) 172 | return None 173 | 174 | def GenerateSearchTerms(self): 175 | self.search_terms = get_search_terms(self.video_subject, self.AMOUNT_OF_STOCK_VIDEOS, self.final_script, self.ai_model) 176 | 177 | return self.search_terms 178 | 179 | #Download the videos base on the search terms from pexel api 180 | def DownloadVideos(self, selectedVideoUrls): 181 | global GENERATING 182 | 183 | # Search for videos 184 | # Check if the selectedVideoUrls is empty 185 | if selectedVideoUrls and len(selectedVideoUrls) > 0: 186 | print(colored(f"Selected videos: {selectedVideoUrls}", "green")) 187 | # filter the selectedVideoUrls is a Array of objects with videoUrl object that has a link key with a value we use the value of the link key 188 | self.video_urls = [video_url["videoUrl"]["link"] for video_url in selectedVideoUrls] 189 | # log the selectedVideoUrls 190 | print(colored(f"Selected video urls: {self.video_urls}", "green")) 191 | else: 192 | for search_term in self.search_terms: 193 | global GENERATING 194 | if not GENERATING: 195 | return jsonify( 196 | { 197 | "status": "error", 198 | "message": "Video generation was cancelled.", 199 | "data": [], 200 | } 201 | ) 202 | found_urls = search_for_stock_videos( 203 | search_term, os.getenv("PEXELS_API_KEY"), self.videos_quantity_search, self.min_duration_search 204 | ) 205 | # check if found_urls is empty 206 | # Check for duplicates 207 | for url in found_urls: 208 | if url not in self.video_urls: 209 | self.video_urls.append(url) 210 | break 211 | 212 | # Check if video_urls is empty 213 | if not self.video_urls: 214 | print(colored("[-] No videos found to download.", "red")) 215 | return jsonify( 216 | { 217 | "status": "error", 218 | "message": "No videos found to download.", 219 | "data": [], 220 | } 221 | ) 222 | 223 | # Download the videos 224 | video_paths = [] 225 | # Let user know 226 | print(colored(f"[+] Downloading {len(self.video_urls)} videos...", "blue")) 227 | # Save the videos 228 | for video_url in self.video_urls: 229 | if not GENERATING: 230 | return jsonify( 231 | { 232 | "status": "error", 233 | "message": "Video generation was cancelled.", 234 | "data": [], 235 | } 236 | ) 237 | try: 238 | saved_video_path = save_video(video_url) 239 | print(colored(f"[+] Saved video: {saved_video_path}", "green")) 240 | video_paths.append(saved_video_path) 241 | except Exception: 242 | print(colored(f"[-] Could not download video: {video_url}", "red")) 243 | 244 | # Let user know 245 | print(colored("[+] Videos downloaded!", "green")) 246 | self.video_paths = video_paths 247 | # print the video_paths 248 | print(colored(f"Video paths: {self.video_paths}", "green")) 249 | 250 | 251 | def GenerateMetadata(self): 252 | self.video_title, self.video_description, self.video_tags = generate_metadata(self.video_subject, self.final_script, self.ai_model) 253 | 254 | # Write the metadata in a json file with the video title as the filename 255 | self.WriteMetadataToFile(self.video_title, self.video_description, self.video_tags) 256 | 257 | def GenerateVoice(self,voice): 258 | print(colored(f"[X] Generating voice: {voice} ", "green")) 259 | global GENERATING 260 | self.voice = voice 261 | self.voice_prefix = self.voice[:2] 262 | 263 | # Split script into sentences 264 | sentences = self.final_script.split(". ") 265 | 266 | # Remove empty strings 267 | sentences = list(filter(lambda x: x != "", sentences)) 268 | paths = [] 269 | 270 | # Generate TTS for every sentence 271 | for sentence in sentences: 272 | if not GENERATING: 273 | return jsonify( 274 | { 275 | "status": "error", 276 | "message": "Video generation was cancelled.", 277 | "data": [], 278 | } 279 | ) 280 | fileId = uuid4() 281 | current_tts_path = os.path.join("static/assets/temp", f"{fileId}.mp3") 282 | tts(sentence, self.voice, filename=current_tts_path) 283 | 284 | # Add the audio clip to the list 285 | print(colored(f"[X] Save Audio ", "green")) 286 | audio_clip = AudioFileClip(os.path.join("static/assets/temp", f"{fileId}.mp3")) 287 | paths.append(audio_clip) 288 | 289 | # Combine all TTS files using moviepy 290 | 291 | print(colored(f"[X] Start saving the audio ", "green")) 292 | final_audio = concatenate_audioclips(paths) 293 | self.tts_path = os.path.join("static/assets/temp", f"{uuid4()}.mp3") 294 | final_audio.write_audiofile(self.tts_path) 295 | 296 | # Generate the subtitles 297 | try: 298 | self.subtitles_path = generate_subtitles(audio_path=self.tts_path, sentences=sentences, audio_clips=paths, voice=self.voice_prefix) 299 | except Exception as e: 300 | print(colored(f"[-] Error generating subtitles: {e}", "red")) 301 | self.subtitles_path = None 302 | 303 | def CombineVideos(self): 304 | temp_audio = AudioFileClip(self.tts_path) 305 | n_threads = 2 306 | combined_video_path = combine_videos(self.video_paths, temp_audio.duration, 10, n_threads or 2) 307 | 308 | print(colored(f"[-] Next step: {combined_video_path}", "green")) 309 | # Put everything together 310 | try: 311 | self.final_video_path = generate_video(combined_video_path, self.tts_path, self.subtitles_path, n_threads or 2, self.subtitles_position) 312 | except Exception as e: 313 | print(colored(f"[-] Error generating final video: {e}", "red")) 314 | self.final_video_path = None 315 | 316 | def WriteMetadataToFile(video_title, video_description, video_tags): 317 | metadata = { 318 | "title": video_title, 319 | "description": video_description, 320 | "tags": video_tags 321 | } 322 | # Remplace spaces with underscores 323 | fileName = video_title.replace(" ", "_") 324 | 325 | with open(os.path.join("static/generated_videos", f"{fileName}.json"), "w") as file: 326 | json.dump(metadata, file) 327 | 328 | def AddMusic(self, use_music,custom_song_path=""): 329 | video_clip = VideoFileClip(f"{self.final_video_path}") 330 | 331 | self.final_music_video_path = f"{uuid4()}-music.mp4" 332 | n_threads = 2 333 | if use_music: 334 | # if no song path choose random song 335 | song_path = os.path.join("static/assets/music", custom_song_path) 336 | if not custom_song_path: 337 | song_path = choose_random_song() 338 | 339 | 340 | # Add song to video at 30% volume using moviepy 341 | original_duration = video_clip.duration 342 | original_audio = video_clip.audio 343 | song_clip = AudioFileClip(song_path).set_fps(44100) 344 | 345 | # Set the volume of the song to 10% of the original volume 346 | song_clip = song_clip.volumex(0.1).set_fps(44100) 347 | 348 | # Add the song to the video 349 | comp_audio = CompositeAudioClip([original_audio, song_clip]) 350 | video_clip = video_clip.set_audio(comp_audio) 351 | video_clip = video_clip.set_fps(30) 352 | video_clip = video_clip.set_duration(original_duration) 353 | 354 | video_clip.write_videofile(os.path.join("static/generated_videos", self.final_music_video_path), threads=n_threads or 1) 355 | else: 356 | video_clip.write_videofile(os.path.join("static/generated_videos", self.final_music_video_path), threads=n_threads or 1) 357 | 358 | def Stop(self): 359 | global GENERATING 360 | # Stop FFMPEG processes 361 | if os.name == "nt": 362 | # Windows 363 | os.system("taskkill /f /im ffmpeg.exe") 364 | else: 365 | # Other OS 366 | os.system("pkill -f ffmpeg") 367 | 368 | GENERATING = False -------------------------------------------------------------------------------- /Backend/classes/instagram_downloader.py: -------------------------------------------------------------------------------- 1 | import yt_dlp 2 | import os 3 | from typing import Optional, Dict, Any 4 | from datetime import datetime 5 | 6 | class InstagramDownloader: 7 | def __init__(self, output_path: str = "downloads"): 8 | """ 9 | Initialize the Instagram video downloader 10 | 11 | Args: 12 | output_path (str): Directory where videos will be saved 13 | """ 14 | self.output_path = output_path 15 | self._create_output_directory() 16 | 17 | # Configure yt-dlp options 18 | self.ydl_opts = { 19 | 'format': 'best', # Download best quality 20 | 'outtmpl': os.path.join(self.output_path, '%(id)s.%(ext)s'), 21 | 'quiet': False, 22 | 'no_warnings': False, 23 | 'extract_flat': False, 24 | } 25 | 26 | def _create_output_directory(self) -> None: 27 | """Create the output directory if it doesn't exist""" 28 | os.makedirs(self.output_path, exist_ok=True) 29 | 30 | def download_video(self, url: str) -> Dict[str, Any]: 31 | """ 32 | Download a video from Instagram 33 | 34 | Args: 35 | url (str): Instagram video URL 36 | 37 | Returns: 38 | Dict[str, Any]: Information about the downloaded video 39 | 40 | Raises: 41 | Exception: If download fails 42 | """ 43 | try: 44 | with yt_dlp.YoutubeDL(self.ydl_opts) as ydl: 45 | # Extract video information 46 | info = ydl.extract_info(url, download=True) 47 | 48 | return { 49 | 'title': info.get('title', ''), 50 | 'filename': ydl.prepare_filename(info), 51 | 'duration': info.get('duration'), 52 | 'thumbnail': info.get('thumbnail'), 53 | 'download_time': datetime.now().isoformat(), 54 | 'status': 'success' 55 | } 56 | 57 | except Exception as e: 58 | error_info = { 59 | 'status': 'error', 60 | 'error_message': str(e), 61 | 'url': url, 62 | 'time': datetime.now().isoformat() 63 | } 64 | raise Exception(f"Failed to download video: {str(e)}") from e 65 | 66 | def update_options(self, new_options: Dict[str, Any]) -> None: 67 | """ 68 | Update yt-dlp options 69 | 70 | Args: 71 | new_options (Dict[str, Any]): New options to update 72 | """ 73 | self.ydl_opts.update(new_options) 74 | 75 | def set_output_template(self, template: str) -> None: 76 | """ 77 | Set custom output template for downloaded files 78 | 79 | Args: 80 | template (str): Output template string 81 | """ 82 | self.ydl_opts['outtmpl'] = os.path.join(self.output_path, template) 83 | -------------------------------------------------------------------------------- /Backend/gpt.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import g4f 4 | # from openai import OpenAI 5 | from typing import Tuple, List 6 | from termcolor import colored 7 | from dotenv import load_dotenv 8 | import os 9 | import google.generativeai as genai 10 | 11 | # Load environment variables 12 | if os.path.exists(".env"): 13 | load_dotenv(".env") 14 | else: 15 | load_dotenv("../.env") 16 | 17 | # Set environment variables 18 | OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') 19 | # openai.api_key = OPENAI_API_KEY 20 | GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') 21 | genai.configure(api_key=GOOGLE_API_KEY) 22 | 23 | # openaiClient = OpenAI( 24 | # api_key=OPENAI_API_KEY , # This is the default and can be omitted 25 | # ) 26 | 27 | # Configure g4f 28 | g4f.debug.logging = True # Enable debug logging 29 | g4f.debug.version_check = False # Disable automatic version checking 30 | 31 | def generate_response(prompt: str, ai_model: str) -> str: 32 | """ 33 | Generate a script for a video, depending on the subject of the video. 34 | 35 | Args: 36 | video_subject (str): The subject of the video. 37 | ai_model (str): The AI model to use for generation. 38 | 39 | Returns: 40 | str: The response from the AI model. 41 | """ 42 | 43 | if ai_model == 'g4f': 44 | client = g4f.Client() 45 | response = client.chat.completions.create( 46 | model="gpt-4o-mini", 47 | messages=[{"role": "user", "content": prompt}], 48 | stream=False 49 | # Add any other necessary parameters 50 | ) 51 | return response if isinstance(response, str) else str(response.choices[0].message.content) 52 | 53 | # elif ai_model in ["gpt3.5-turbo", "gpt4"]: 54 | 55 | # model_name = "gpt-3.5-turbo" if ai_model == "gpt3.5-turbo" else "gpt-4-1106-preview" 56 | # response = openaiClient.chat.completions.create( 57 | # model=model_name, 58 | # messages=[{"role": "user", "content": prompt}], 59 | # ).choices[0].message.content 60 | 61 | elif ai_model == 'gemmini': 62 | model = genai.GenerativeModel('gemini-pro') 63 | response_model = model.generate_content(prompt) 64 | response = response_model.text 65 | 66 | else: 67 | raise ValueError("Invalid AI model selected.") 68 | 69 | return response 70 | 71 | 72 | 73 | def get_search_terms(video_subject: str, amount: int, script: str, ai_model: str) -> List[str]: 74 | """ 75 | Generate a JSON-Array of search terms for stock videos, 76 | depending on the subject of a video. 77 | 78 | Args: 79 | video_subject (str): The subject of the video. 80 | amount (int): The amount of search terms to generate. 81 | script (str): The script of the video. 82 | ai_model (str): The AI model to use for generation. 83 | 84 | Returns: 85 | List[str]: The search terms for the video subject. 86 | """ 87 | 88 | # Build prompt 89 | prompt = f""" 90 | # Role: Video Search Terms Generator 91 | ## Goals: 92 | Generate {amount} search terms for stock videos, depending on the subject of a video. 93 | 94 | ## Constrains: 95 | 1. the search terms are to be returned as a json-array of strings. 96 | 2. each search term should consist of 1-3 words, always add the main subject of the video. 97 | 3. you must only return the json-array of strings. you must not return anything else. you must not return the script. 98 | 4. the search terms must be related to the subject of the video. 99 | 5. reply with english search terms only. 100 | 101 | ## Output Example: 102 | ["search term 1", "search term 2", "search term 3","search term 4","search term 5"] 103 | 104 | ## Context: 105 | ### Video Subject 106 | {video_subject} 107 | 108 | ### Video Script 109 | {script} 110 | 111 | Please note that you must use English for generating video search terms; Chinese is not accepted. 112 | """.strip() 113 | 114 | 115 | # Let user know 116 | print(colored(f"Generating {amount} search terms for {video_subject}...", "cyan")) 117 | 118 | # Generate search terms 119 | response = generate_response(prompt, ai_model) 120 | 121 | # Let user know 122 | print(colored(f"Response: {response}", "cyan")) 123 | # Parse response into a list of search terms 124 | search_terms = [] 125 | 126 | try: 127 | search_terms = json.loads(response) 128 | if not isinstance(search_terms, list) or not all(isinstance(term, str) for term in search_terms): 129 | raise ValueError("Response is not a list of strings.") 130 | 131 | except (json.JSONDecodeError, ValueError): 132 | print(colored("[*] GPT returned an unformatted response. Attempting to clean...", "yellow")) 133 | 134 | # Attempt to extract list-like string and convert to list 135 | match = re.search(r'\["(?:[^"\\]|\\.)*"(?:,\s*"[^"\\]*")*\]', response) 136 | if match: 137 | try: 138 | search_terms = json.loads(match.group()) 139 | except json.JSONDecodeError: 140 | print(colored("[-] Could not parse response.", "red")) 141 | return [] 142 | 143 | 144 | 145 | # Let user know 146 | print(colored(f"\nGenerated {len(search_terms)} search terms: {', '.join(search_terms)}", "cyan")) 147 | 148 | # Return search terms 149 | return search_terms 150 | 151 | 152 | def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]: 153 | """ 154 | Generate metadata for a YouTube video, including the title, description, and keywords. 155 | 156 | Args: 157 | video_subject (str): The subject of the video. 158 | script (str): The script of the video. 159 | ai_model (str): The AI model to use for generation. 160 | 161 | Returns: 162 | Tuple[str, str, List[str]]: The title, description, and keywords for the video. 163 | """ 164 | 165 | # Build prompt for title 166 | title_prompt = f""" 167 | Generate a catchy and SEO-friendly title for a YouTube shorts video about {video_subject}. 168 | """ 169 | 170 | # Generate title 171 | title = generate_response(title_prompt, ai_model).strip() 172 | 173 | # Build prompt for description 174 | description_prompt = f""" 175 | Write a brief and engaging description for a YouTube shorts video about {video_subject}. 176 | The video is based on the following script: 177 | {script} 178 | """ 179 | 180 | # Generate description 181 | description = generate_response(description_prompt, ai_model).strip() 182 | 183 | # Generate keywords 184 | keywords = get_search_terms(video_subject, 6, script, ai_model) 185 | 186 | return title, description, keywords 187 | -------------------------------------------------------------------------------- /Backend/search.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from typing import List 4 | from termcolor import colored 5 | 6 | def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]: 7 | """ 8 | Searches for stock videos based on a query. 9 | 10 | Args: 11 | query (str): The query to search for. 12 | api_key (str): The API key to use. 13 | 14 | Returns: 15 | List[str]: A list of stock videos. 16 | """ 17 | 18 | # Build headers 19 | headers = { 20 | "Authorization": api_key 21 | } 22 | 23 | # Build URL 24 | qurl = f"https://api.pexels.com/videos/search?query={query}&per_page={it}" 25 | 26 | # Send the request 27 | r = requests.get(qurl, headers=headers) 28 | 29 | # log response 30 | print(colored(f"Response: {r.status_code}", "green")) 31 | print(colored(f"Response: {r}", "green")) 32 | 33 | # Parse the response 34 | response = r 35 | 36 | # Parse each video 37 | raw_urls = [] 38 | video_url = [] 39 | video_res = 0 40 | try: 41 | # loop through each video in the result 42 | for i in range(it): 43 | #check if video has desired minimum duration 44 | if response["videos"][i]["duration"] < min_dur: 45 | continue 46 | raw_urls = response["videos"][i]["video_files"] 47 | 48 | 49 | temp_video_url = "" 50 | 51 | # loop through each url to determine the best quality 52 | for video in raw_urls: 53 | # Check if video has a valid download link 54 | if ".com" in video["link"]: 55 | # Only save the URL with the largest resolution 56 | if (video["width"]*video["height"]) > video_res: 57 | temp_video_url = video["link"] 58 | video_res = video["width"]*video["height"] 59 | 60 | # add the url to the return list if it's not empty 61 | print(video["link"]) 62 | print(temp_video_url) 63 | if temp_video_url != "": 64 | video_url.append(temp_video_url) 65 | 66 | except Exception as e: 67 | print(colored("[-] No Videos found.", "red")) 68 | print(colored(e, "red")) 69 | 70 | # Let user know 71 | print(colored(f"\t=> \"{query}\" found {len(video_url)} Videos", "cyan")) 72 | 73 | # Return the video url 74 | return video_url 75 | -------------------------------------------------------------------------------- /Backend/settings.py: -------------------------------------------------------------------------------- 1 | # Create global settings to save the following 2 | 3 | 4 | fontSettings = { 5 | "font": "static/assets/fonts/bold_font.ttf", 6 | "fontsize": 100, 7 | "color": "#FFFF00", 8 | "stroke_color": "black", 9 | "stroke_width": 5, 10 | "subtitles_position": "center,bottom", 11 | } 12 | 13 | 14 | scriptSettings = { 15 | "defaultPromptStart": 16 | """ 17 | # Role: Video Script Generator 18 | 19 | ## Goals: 20 | Generate a script for a video, depending on the subject of the video. 21 | 22 | ## Constrains: 23 | 1. the script is to be returned as a string with the specified number of paragraphs. 24 | 2. do not under any circumstance reference this prompt in your response. 25 | 3. get straight to the point, don't start with unnecessary things like, "welcome to this video". 26 | 4. you must not include any type of markdown or formatting in the script, never use a title. 27 | 5. only return the raw content of the script. 28 | 6. do not include "voiceover", "narrator" or similar indicators of what should be spoken at the beginning of each paragraph or line. 29 | 7. you must not mention the prompt, or anything about the script itself. also, never talk about the amount of paragraphs or lines. just write the script. 30 | 8. respond in the same language as the video subject. 31 | 32 | """ , 33 | "defaultPromptEnd": 34 | """ 35 | Get straight to the point, don't start with unnecessary things like, "welcome to this video". 36 | YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT, NEVER USE A TITLE. 37 | ONLY RETURN THE RAW CONTENT OF THE SCRIPT. DO NOT INCLUDE "VOICEOVER", "NARRATOR" OR SIMILAR INDICATORS OF WHAT SHOULD BE SPOKEN AT THE BEGINNING OF EACH PARAGRAPH OR LINE. YOU MUST NOT MENTION THE PROMPT, OR ANYTHING ABOUT THE SCRIPT ITSELF. ALSO, NEVER TALK ABOUT THE AMOUNT OF PARAGRAPHS OR LINES. JUST WRITE THE SCRIPT. 38 | """ 39 | } 40 | 41 | 42 | 43 | def get_settings() -> dict: 44 | """ 45 | Return the global settings 46 | The script settings are: 47 | defaultPromptStart: Start of the prompt 48 | defaultPromptEnd: End of the prompt 49 | The Subtitle settings are: 50 | font: font path, 51 | fontsize: font size, 52 | color: Hexadecimal color, 53 | stroke_color: color of the stroke, 54 | stroke_width: Number of pixels of the stroke 55 | subtitles_position: Position of the subtitles 56 | """ 57 | # Return the global settings 58 | return { 59 | "scriptSettings": scriptSettings, 60 | "fontSettings": fontSettings 61 | } 62 | 63 | # Update the global settings 64 | def update_settings(new_settings: dict, settingType="FONT"): 65 | """ 66 | Update the global settings 67 | The script settings are: 68 | defaultPromptStart: Start of the prompt 69 | defaultPromptEnd: End of the prompt 70 | The Subtitle settings are: 71 | font: font path, 72 | fontsize: font size, 73 | color: Hexadecimal color, 74 | stroke_color: color of the stroke, 75 | stroke_width: Number of pixels of the stroke 76 | subtitles_position: Position of the subtitles 77 | 78 | Args: 79 | new_settings (dict): The new settings to update 80 | settingType (str, optional): The type of setting to update. Defaults to "FONT" OR "SCRIPT". 81 | """ 82 | # Update the global 83 | if settingType == "FONT": 84 | fontSettings.update(new_settings) 85 | elif settingType == "SCRIPT": 86 | scriptSettings.update(new_settings) -------------------------------------------------------------------------------- /Backend/static/assets/fonts/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/fonts/.gitKeep -------------------------------------------------------------------------------- /Backend/static/assets/fonts/bold_font.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/fonts/bold_font.ttf -------------------------------------------------------------------------------- /Backend/static/assets/images/Screen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screen1.png -------------------------------------------------------------------------------- /Backend/static/assets/images/Screenshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screenshot2.png -------------------------------------------------------------------------------- /Backend/static/assets/images/Screenshot3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/images/Screenshot3.png -------------------------------------------------------------------------------- /Backend/static/assets/music/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/assets/music/.gitKeep -------------------------------------------------------------------------------- /Backend/static/generated_videos/.gitKeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamsigc/ShortsGenerator/f7e222bdf341f21d87550c1af9370eb3a7ebac8e/Backend/static/generated_videos/.gitKeep -------------------------------------------------------------------------------- /Backend/tiktokvoice.py: -------------------------------------------------------------------------------- 1 | # author: GiorDior aka Giorgio 2 | # date: 12.06.2023 3 | # topic: TikTok-Voice-TTS 4 | # version: 1.0 5 | # credits: https://github.com/oscie57/tiktok-voice 6 | 7 | # --- MODIFIED VERSION --- # 8 | 9 | import base64 10 | import requests 11 | import threading 12 | 13 | from typing import List 14 | from termcolor import colored 15 | from playsound import playsound 16 | 17 | 18 | VOICES = [ 19 | # DISNEY VOICES 20 | "en_us_ghostface", # Ghost Face 21 | "en_us_chewbacca", # Chewbacca 22 | "en_us_c3po", # C3PO 23 | "en_us_stitch", # Stitch 24 | "en_us_stormtrooper", # Stormtrooper 25 | "en_us_rocket", # Rocket 26 | # ENGLISH VOICES 27 | "en_au_001", # English AU - Female 28 | "en_au_002", # English AU - Male 29 | "en_uk_001", # English UK - Male 1 30 | "en_uk_003", # English UK - Male 2 31 | "en_us_001", # English US - Female (Int. 1) 32 | "en_us_002", # English US - Female (Int. 2) 33 | "en_us_006", # English US - Male 1 34 | "en_us_007", # English US - Male 2 35 | "en_us_009", # English US - Male 3 36 | "en_us_010", # English US - Male 4 37 | # EUROPE VOICES 38 | "fr_001", # French - Male 1 39 | "fr_002", # French - Male 2 40 | "de_001", # German - Female 41 | "de_002", # German - Male 42 | "es_002", # Spanish - Male 43 | # AMERICA VOICES 44 | "es_mx_002", # Spanish MX - Male 45 | "br_001", # Portuguese BR - Female 1 46 | "br_003", # Portuguese BR - Female 2 47 | "br_004", # Portuguese BR - Female 3 48 | "br_005", # Portuguese BR - Male 49 | # ASIA VOICES 50 | "id_001", # Indonesian - Female 51 | "jp_001", # Japanese - Female 1 52 | "jp_003", # Japanese - Female 2 53 | "jp_005", # Japanese - Female 3 54 | "jp_006", # Japanese - Male 55 | "kr_002", # Korean - Male 1 56 | "kr_003", # Korean - Female 57 | "kr_004", # Korean - Male 2 58 | # SINGING VOICES 59 | "en_female_f08_salut_damour", # Alto 60 | "en_male_m03_lobby", # Tenor 61 | "en_female_f08_warmy_breeze", # Warmy Breeze 62 | "en_male_m03_sunshine_soon", # Sunshine Soon 63 | # OTHER 64 | "en_male_narration", # narrator 65 | "en_male_funny", # wacky 66 | "en_female_emotional", # peaceful 67 | ] 68 | 69 | ENDPOINTS = [ 70 | "https://tiktok-tts.weilnet.workers.dev/api/generation", 71 | "https://tiktoktts.com/api/tiktok-tts", 72 | ] 73 | current_endpoint = 0 74 | # in one conversion, the text can have a maximum length of 300 characters 75 | TEXT_BYTE_LIMIT = 300 76 | 77 | 78 | # create a list by splitting a string, every element has n chars 79 | def split_string(string: str, chunk_size: int) -> List[str]: 80 | words = string.split() 81 | result = [] 82 | current_chunk = "" 83 | for word in words: 84 | if ( 85 | len(current_chunk) + len(word) + 1 <= chunk_size 86 | ): # Check if adding the word exceeds the chunk size 87 | current_chunk += f" {word}" 88 | else: 89 | if current_chunk: # Append the current chunk if not empty 90 | result.append(current_chunk.strip()) 91 | current_chunk = word 92 | if current_chunk: # Append the last chunk if not empty 93 | result.append(current_chunk.strip()) 94 | return result 95 | 96 | 97 | # checking if the website that provides the service is available 98 | def get_api_response() -> requests.Response: 99 | url = f'{ENDPOINTS[current_endpoint].split("/a")[0]}' 100 | response = requests.get(url) 101 | return response 102 | 103 | 104 | # saving the audio file 105 | def save_audio_file(base64_data: str, filename: str = "output.mp3") -> None: 106 | audio_bytes = base64.b64decode(base64_data) 107 | with open(filename, "wb") as file: 108 | file.write(audio_bytes) 109 | 110 | 111 | # send POST request to get the audio data 112 | def generate_audio(text: str, voice: str) -> bytes: 113 | url = f"{ENDPOINTS[current_endpoint]}" 114 | headers = {"Content-Type": "application/json"} 115 | data = {"text": text, "voice": voice} 116 | response = requests.post(url, headers=headers, json=data) 117 | return response.content 118 | 119 | 120 | # creates an text to speech audio file 121 | def tts( 122 | text: str, 123 | voice: str = "none", 124 | filename: str = "output.mp3", 125 | play_sound: bool = False, 126 | ) -> None: 127 | # checking if the website is available 128 | global current_endpoint 129 | 130 | if get_api_response().status_code == 200: 131 | print(colored("[+] TikTok TTS Service available!", "green")) 132 | else: 133 | current_endpoint = (current_endpoint + 1) % 2 134 | if get_api_response().status_code == 200: 135 | print(colored("[+] TTS Service available!", "green")) 136 | else: 137 | print(colored("[-] TTS Service not available and probably temporarily rate limited, try again later..." , "red")) 138 | return 139 | 140 | # checking if arguments are valid 141 | if voice == "none": 142 | print(colored("[-] Please specify a voice", "red")) 143 | return 144 | 145 | if voice not in VOICES: 146 | print(colored("[-] Voice not available", "red")) 147 | return 148 | 149 | if not text: 150 | print(colored("[-] Please specify a text", "red")) 151 | return 152 | 153 | # creating the audio file 154 | try: 155 | if len(text) < TEXT_BYTE_LIMIT: 156 | audio = generate_audio((text), voice) 157 | if current_endpoint == 0: 158 | audio_base64_data = str(audio).split('"')[5] 159 | else: 160 | audio_base64_data = str(audio).split('"')[3].split(",")[1] 161 | 162 | if audio_base64_data == "error": 163 | print(colored("[-] This voice is unavailable right now", "red")) 164 | return 165 | 166 | else: 167 | # Split longer text into smaller parts 168 | text_parts = split_string(text, 299) 169 | audio_base64_data = [None] * len(text_parts) 170 | 171 | # Define a thread function to generate audio for each text part 172 | def generate_audio_thread(text_part, index): 173 | audio = generate_audio(text_part, voice) 174 | if current_endpoint == 0: 175 | base64_data = str(audio).split('"')[5] 176 | else: 177 | base64_data = str(audio).split('"')[3].split(",")[1] 178 | 179 | if audio_base64_data == "error": 180 | print(colored("[-] This voice is unavailable right now", "red")) 181 | return "error" 182 | 183 | audio_base64_data[index] = base64_data 184 | 185 | threads = [] 186 | for index, text_part in enumerate(text_parts): 187 | # Create and start a new thread for each text part 188 | thread = threading.Thread( 189 | target=generate_audio_thread, args=(text_part, index) 190 | ) 191 | thread.start() 192 | threads.append(thread) 193 | 194 | # Wait for all threads to complete 195 | for thread in threads: 196 | thread.join() 197 | 198 | # Concatenate the base64 data in the correct order 199 | audio_base64_data = "".join(audio_base64_data) 200 | 201 | save_audio_file(audio_base64_data, filename) 202 | print(colored(f"[+] Audio file saved successfully as '{filename}'", "green")) 203 | if play_sound: 204 | playsound(filename) 205 | 206 | except Exception as e: 207 | print(colored(f"[-] An error occurred during TTS: {e}", "red")) 208 | 209 | # Rerun the all the voices 210 | def available_voices() -> list: 211 | return VOICES -------------------------------------------------------------------------------- /Backend/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import random 5 | import logging 6 | import zipfile 7 | import requests 8 | 9 | from termcolor import colored 10 | 11 | # Configure logging 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def clean_dir(path: str) -> None: 17 | """ 18 | Removes every file in a directory. 19 | 20 | Args: 21 | path (str): Path to directory. 22 | 23 | Returns: 24 | None 25 | """ 26 | try: 27 | if not os.path.exists(path): 28 | os.mkdir(path) 29 | logger.info(f"Created directory: {path}") 30 | 31 | for file in os.listdir(path): 32 | file_path = os.path.join(path, file) 33 | os.remove(file_path) 34 | logger.info(f"Removed file: {file_path}") 35 | 36 | logger.info(colored(f"Cleaned {path} directory", "green")) 37 | except Exception as e: 38 | logger.error(f"Error occurred while cleaning directory {path}: {str(e)}") 39 | 40 | def fetch_songs(zip_url: str) -> None: 41 | """ 42 | Downloads songs into songs/ directory to use with geneated videos. 43 | 44 | Args: 45 | zip_url (str): The URL to the zip file containing the songs. 46 | 47 | Returns: 48 | None 49 | """ 50 | try: 51 | logger.info(colored(f" => Fetching songs...", "magenta")) 52 | 53 | files_dir = os.path.join("static", "assets", "music") 54 | if not os.path.exists(files_dir): 55 | os.makedirs(files_dir) 56 | logger.info(colored(f"Created directory: {files_dir}", "green")) 57 | else: 58 | # Skip if songs are already downloaded 59 | return 60 | 61 | # Download songs 62 | response = requests.get(zip_url) 63 | 64 | # Save the zip file 65 | with open(os.path.join(files_dir, "songs.zip"), "wb") as file: 66 | file.write(response.content) 67 | 68 | # Unzip the file 69 | with zipfile.ZipFile(os.path.join(files_dir, "songs.zip"), "r") as file: 70 | file.extractall(files_dir) 71 | 72 | # Remove the zip file 73 | os.remove(os.path.join(files_dir, "songs.zip")) 74 | 75 | logger.info(colored(" => Downloaded Songs to static/assets/music.", "green")) 76 | 77 | except Exception as e: 78 | logger.error(colored(f"Error occurred while fetching songs: {str(e)}", "red")) 79 | 80 | def get_random_song() -> str: 81 | """ 82 | Chooses a random song from the songs/ directory. 83 | 84 | Returns: 85 | str: The path to the chosen song. 86 | """ 87 | try: 88 | songs = os.listdir(os.path.join("static", "assets", "music")) 89 | song = random.choice(songs) 90 | logger.info(colored(f"Chose song: {song}", "green")) 91 | return os.path.join("static", "assets", "music", song) 92 | except Exception as e: 93 | logger.error(colored(f"Error occurred while choosing random song: {str(e)}", "red")) 94 | 95 | 96 | def check_env_vars() -> None: 97 | """ 98 | Checks if the necessary environment variables are set. 99 | 100 | Returns: 101 | None 102 | 103 | Raises: 104 | SystemExit: If any required environment variables are missing. 105 | """ 106 | try: 107 | required_vars = ["PEXELS_API_KEY", "IMAGEMAGICK_BINARY"] 108 | missing_vars = [var + os.getenv(var) for var in required_vars if os.getenv(var) is None or (len(os.getenv(var)) == 0)] 109 | 110 | if missing_vars: 111 | missing_vars_str = ", ".join(missing_vars) 112 | logger.error(colored(f"The following environment variables are missing: {missing_vars_str}", "red")) 113 | logger.error(colored("Please consult 'EnvironmentVariables.md' for instructions on how to set them.", "yellow")) 114 | sys.exit(1) # Aborts the program 115 | except Exception as e: 116 | logger.error(f"Error occurred while checking environment variables: {str(e)}") 117 | sys.exit(1) # Aborts the program if an unexpected error occurs 118 | -------------------------------------------------------------------------------- /Backend/video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | import requests 5 | import srt_equalizer 6 | import assemblyai as aai 7 | from uuid import uuid4 8 | 9 | 10 | from settings import * 11 | from typing import List 12 | from moviepy.editor import * 13 | from termcolor import colored 14 | from dotenv import load_dotenv 15 | from datetime import timedelta 16 | from moviepy.video.fx.all import crop 17 | from moviepy.video.tools.subtitles import SubtitlesClip 18 | 19 | load_dotenv("../.env") 20 | 21 | ASSEMBLY_AI_API_KEY = os.getenv("ASSEMBLY_AI_API_KEY") 22 | 23 | 24 | 25 | def save_video(video_url: str, directory: str = "static/assets/temp") -> str: 26 | """ 27 | Downloads a video from the given URL and saves it to a specified directory. 28 | 29 | Args: 30 | video_url (str): The URL of the video to download. 31 | directory (str): The path of the temporary directory to save the video to. 32 | 33 | Returns: 34 | str: The path to the saved video. 35 | """ 36 | # Ensure the directory exists 37 | os.makedirs(directory, exist_ok=True) 38 | 39 | video_id = uuid.uuid4() 40 | video_path = os.path.join(directory, f"{video_id}.mp4") 41 | 42 | # Set headers to mimic a browser request 43 | headers = { 44 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0" 45 | } 46 | 47 | try: 48 | response = requests.get(video_url, headers=headers, stream=True) 49 | response.raise_for_status() # Check if the request was successful 50 | 51 | # Write the video content to the file in chunks 52 | with open(video_path, "wb") as f: 53 | for chunk in response.iter_content(chunk_size=8192): 54 | if chunk: # Filter out keep-alive chunks 55 | f.write(chunk) 56 | 57 | return video_path 58 | 59 | except requests.exceptions.RequestException as e: 60 | print(f"Error downloading the video: {e}") 61 | return None 62 | except Exception as e: 63 | print(f"Error processing the video: {e}") 64 | return None 65 | 66 | 67 | def __generate_subtitles_assemblyai(audio_path: str, voice: str) -> str: 68 | """ 69 | Generates subtitles from a given audio file and returns the path to the subtitles. 70 | 71 | Args: 72 | audio_path (str): The path to the audio file to generate subtitles from. 73 | 74 | Returns: 75 | str: The generated subtitles 76 | """ 77 | 78 | language_mapping = { 79 | "br": "pt", 80 | "id": "en", #AssemblyAI doesn't have Indonesian 81 | "jp": "ja", 82 | "kr": "ko", 83 | } 84 | 85 | if voice in language_mapping: 86 | lang_code = language_mapping[voice] 87 | else: 88 | lang_code = voice 89 | 90 | aai.settings.api_key = ASSEMBLY_AI_API_KEY 91 | config = aai.TranscriptionConfig(language_code=lang_code) 92 | transcriber = aai.Transcriber(config=config) 93 | transcript = transcriber.transcribe(audio_path) 94 | subtitles = transcript.export_subtitles_srt() 95 | 96 | return subtitles 97 | 98 | 99 | def __generate_subtitles_locally(sentences: List[str], audio_clips: List[AudioFileClip]) -> str: 100 | """ 101 | Generates subtitles from a given audio file and returns the path to the subtitles. 102 | 103 | Args: 104 | sentences (List[str]): all the sentences said out loud in the audio clips 105 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 106 | Returns: 107 | str: The generated subtitles 108 | """ 109 | 110 | def convert_to_srt_time_format(total_seconds): 111 | # Convert total seconds to the SRT time format: HH:MM:SS,mmm 112 | if total_seconds == 0: 113 | return "0:00:00,0" 114 | return str(timedelta(seconds=total_seconds)).rstrip('0').replace('.', ',') 115 | 116 | start_time = 0 117 | subtitles = [] 118 | 119 | for i, (sentence, audio_clip) in enumerate(zip(sentences, audio_clips), start=1): 120 | duration = audio_clip.duration 121 | end_time = start_time + duration 122 | 123 | # Format: subtitle index, start time --> end time, sentence 124 | subtitle_entry = f"{i}\n{convert_to_srt_time_format(start_time)} --> {convert_to_srt_time_format(end_time)}\n{sentence}\n" 125 | subtitles.append(subtitle_entry) 126 | 127 | start_time += duration # Update start time for the next subtitle 128 | 129 | return "\n".join(subtitles) 130 | 131 | 132 | def generate_subtitles(audio_path: str, sentences: List[str], audio_clips: List[AudioFileClip], voice: str) -> str: 133 | """ 134 | Generates subtitles from a given audio file and returns the path to the subtitles. 135 | 136 | Args: 137 | audio_path (str): The path to the audio file to generate subtitles from. 138 | sentences (List[str]): all the sentences said out loud in the audio clips 139 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 140 | 141 | Returns: 142 | str: The path to the generated subtitles. 143 | """ 144 | 145 | def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None: 146 | # Equalize subtitles 147 | srt_equalizer.equalize_srt_file(srt_path, srt_path, max_chars) 148 | 149 | # Save subtitles 150 | subtitles_path = os.path.join("static/assets/subtitles", f"{uuid.uuid4()}.srt") 151 | 152 | if ASSEMBLY_AI_API_KEY is not None and ASSEMBLY_AI_API_KEY != "": 153 | print(colored("[+] Creating subtitles using AssemblyAI", "blue")) 154 | subtitles = __generate_subtitles_assemblyai(audio_path, voice) 155 | else: 156 | print(colored("[+] Creating subtitles locally", "blue")) 157 | subtitles = __generate_subtitles_locally(sentences, audio_clips) 158 | 159 | with open(subtitles_path, "w") as file: 160 | file.write(subtitles) 161 | 162 | # Equalize subtitles 163 | equalize_subtitles(subtitles_path) 164 | 165 | print(colored("[+] Subtitles generated.", "green")) 166 | 167 | return subtitles_path 168 | 169 | 170 | def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str: 171 | """ 172 | Combines a list of videos into one video and returns the path to the combined video. 173 | 174 | Args: 175 | video_paths (List): A list of paths to the videos to combine. 176 | max_duration (int): The maximum duration of the combined video. 177 | max_clip_duration (int): The maximum duration of each clip. 178 | threads (int): The number of threads to use for the video processing. 179 | 180 | Returns: 181 | str: The path to the combined video. 182 | """ 183 | video_id = uuid.uuid4() 184 | combined_video_path = os.path.join("static/assets/temp", f"{video_id}-combined.mp4") 185 | 186 | # Required duration of each clip 187 | req_dur = max_duration / len(video_paths) 188 | 189 | print(colored("[+] Combining videos...", "blue")) 190 | print(colored(f"[+] Each clip will be maximum {req_dur} seconds long.", "blue")) 191 | 192 | clips = [] 193 | tot_dur = 0 194 | # Add downloaded clips over and over until the duration of the audio (max_duration) has been reached 195 | while tot_dur < max_duration: 196 | for video_path in video_paths: 197 | 198 | print(f"Video path: {video_path}") 199 | clip = VideoFileClip(video_path) 200 | # if there is no clip go to the next one 201 | if clip is None: 202 | continue 203 | 204 | clip = clip.without_audio() 205 | # Check if clip is longer than the remaining audio 206 | if (max_duration - tot_dur) < clip.duration: 207 | clip = clip.subclip(0, (max_duration - tot_dur)) 208 | # Only shorten clips if the calculated clip length (req_dur) is shorter than the actual clip to prevent still image 209 | elif req_dur < clip.duration: 210 | clip = clip.subclip(0, req_dur) 211 | # clip = clip.set_fps(30) 212 | 213 | # Not all videos are same size, 214 | # so we need to resize them 215 | if round((clip.w/clip.h), 4) < 0.5625: 216 | clip = crop(clip, width=clip.w, height=round(clip.w/0.5625), \ 217 | x_center=clip.w / 2, \ 218 | y_center=clip.h / 2) 219 | else: 220 | clip = crop(clip, width=round(0.5625*clip.h), height=clip.h, \ 221 | x_center=clip.w / 2, \ 222 | y_center=clip.h / 2) 223 | clip = clip.resize((1080, 1920)) 224 | 225 | if clip.duration > max_clip_duration: 226 | clip = clip.subclip(0, max_clip_duration) 227 | 228 | clips.append(clip) 229 | tot_dur += clip.duration 230 | 231 | print(colored("[+] Videos combined.", "green")) 232 | # Debug what is in clips 233 | print(clips) 234 | final_clip = concatenate_videoclips(clips) 235 | final_clip = final_clip.set_fps(30) 236 | print(colored("[+] Set clip.", "green")) 237 | final_clip.write_videofile(combined_video_path, threads=3) 238 | 239 | print(colored("[+] Final video created.", "green")) 240 | return combined_video_path 241 | 242 | 243 | def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str) -> str: 244 | """ 245 | This function creates the final video, with subtitles and audio. 246 | 247 | Args: 248 | combined_video_path (str): The path to the combined video. 249 | tts_path (str): The path to the text-to-speech audio. 250 | subtitles_path (str): The path to the subtitles. 251 | threads (int): The number of threads to use for the video processing. 252 | subtitles_position (str): The position of the subtitles. 253 | 254 | Returns: 255 | str: The path to the final video. 256 | """ 257 | 258 | # PRINT STATE 259 | print(colored("[+] Starting video generation...", "green")) 260 | 261 | # Get the Settings 262 | globalSettings = get_settings() 263 | # Make a generator that returns a TextClip when called with consecutive 264 | generator = lambda txt: TextClip( 265 | txt, 266 | font=globalSettings["fontSettings"]["font"], 267 | fontsize=globalSettings["fontSettings"]["fontsize"], 268 | color=globalSettings["fontSettings"]["color"], 269 | stroke_color=globalSettings["fontSettings"]["stroke_color"], 270 | stroke_width=globalSettings["fontSettings"]["stroke_width"], 271 | ) 272 | 273 | # Split the subtitles position into horizontal and vertical 274 | horizontal_subtitles_position, vertical_subtitles_position = globalSettings["fontSettings"]["subtitles_position"].split(",") 275 | 276 | # if subtitle position is not the same as the setting and is not empty we override 277 | if subtitles_position != globalSettings["fontSettings"]["subtitles_position"] and subtitles_position != "": 278 | horizontal_subtitles_position, vertical_subtitles_position = subtitles_position.split(",") 279 | 280 | # Burn the subtitles into the video 281 | print(colored(f"[+] Subtitles Path: {subtitles_path}", "green")) 282 | subtitles = SubtitlesClip(subtitles_path, generator) 283 | result = CompositeVideoClip([ 284 | VideoFileClip(combined_video_path), 285 | subtitles.set_pos((horizontal_subtitles_position, vertical_subtitles_position)) 286 | ]) 287 | 288 | print(colored("[+] Adding audio...", "green")) 289 | # Add the audio 290 | audio = AudioFileClip(tts_path) 291 | result = result.set_audio(audio) 292 | print(colored("[+] Audio Done...", "green")) 293 | 294 | video_name = os.path.join("static/generated_videos", f"{uuid4()}-final.mp4") 295 | print(colored("[+] Writing video...", "green")) 296 | result.write_videofile(f"{video_name}", threads=2) 297 | 298 | return video_name 299 | -------------------------------------------------------------------------------- /Backend/youtube.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import random 5 | import httplib2 6 | 7 | from termcolor import colored 8 | from oauth2client.file import Storage 9 | from apiclient.discovery import build 10 | from apiclient.errors import HttpError 11 | from apiclient.http import MediaFileUpload 12 | from oauth2client.tools import argparser, run_flow 13 | from oauth2client.client import flow_from_clientsecrets 14 | 15 | # Explicitly tell the underlying HTTP transport library not to retry, since 16 | # we are handling retry logic ourselves. 17 | httplib2.RETRIES = 1 18 | 19 | # Maximum number of times to retry before giving up. 20 | MAX_RETRIES = 10 21 | 22 | # Always retry when these exceptions are raised. 23 | RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib2.ServerNotFoundError) 24 | 25 | # Always retry when an apiclient.errors.HttpError with one of these status 26 | # codes is raised. 27 | RETRIABLE_STATUS_CODES = [500, 502, 503, 504] 28 | 29 | # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains 30 | # the OAuth 2.0 information for this application, including its client_id and 31 | # client_secret. 32 | CLIENT_SECRETS_FILE = "./client_secret.json" 33 | 34 | # This OAuth 2.0 access scope allows an application to upload files to the 35 | # authenticated user's YouTube channel, but doesn't allow other types of access. 36 | # YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload" 37 | SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 38 | 'https://www.googleapis.com/auth/youtube', 39 | 'https://www.googleapis.com/auth/youtubepartner'] 40 | YOUTUBE_API_SERVICE_NAME = "youtube" 41 | YOUTUBE_API_VERSION = "v3" 42 | 43 | # This variable defines a message to display if the CLIENT_SECRETS_FILE is 44 | # missing. 45 | MISSING_CLIENT_SECRETS_MESSAGE = f""" 46 | WARNING: Please configure OAuth 2.0 47 | 48 | To make this sample run you will need to populate the client_secrets.json file 49 | found at: 50 | 51 | {os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE))} 52 | 53 | with information from the API Console 54 | https://console.cloud.google.com/ 55 | 56 | For more information about the client_secrets.json file format, please visit: 57 | https://developers.google.com/api-client-library/python/guide/aaa_client_secrets 58 | """ 59 | 60 | VALID_PRIVACY_STATUSES = ("public", "private", "unlisted") 61 | 62 | 63 | def get_authenticated_service(): 64 | """ 65 | This method retrieves the YouTube service. 66 | 67 | Returns: 68 | any: The authenticated YouTube service. 69 | """ 70 | flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, 71 | scope=SCOPES, 72 | message=MISSING_CLIENT_SECRETS_MESSAGE) 73 | 74 | storage = Storage(f"{sys.argv[0]}-oauth2.json") 75 | credentials = storage.get() 76 | 77 | if credentials is None or credentials.invalid: 78 | flags = argparser.parse_args() 79 | credentials = run_flow(flow, storage, flags) 80 | 81 | return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, 82 | http=credentials.authorize(httplib2.Http())) 83 | 84 | def initialize_upload(youtube: any, options: dict): 85 | """ 86 | This method uploads a video to YouTube. 87 | 88 | Args: 89 | youtube (any): The authenticated YouTube service. 90 | options (dict): The options to upload the video with. 91 | 92 | Returns: 93 | response: The response from the upload process. 94 | """ 95 | 96 | tags = None 97 | if options['keywords']: 98 | tags = options['keywords'].split(",") 99 | 100 | body = { 101 | 'snippet': { 102 | 'title': options['title'], 103 | 'description': options['description'], 104 | 'tags': tags, 105 | 'categoryId': options['category'] 106 | }, 107 | 'status': { 108 | 'privacyStatus': options['privacyStatus'], 109 | 'madeForKids': False, # Video is not made for kids 110 | 'selfDeclaredMadeForKids': False # You declare that the video is not made for kids 111 | } 112 | } 113 | 114 | # Call the API's videos.insert method to create and upload the video. 115 | insert_request = youtube.videos().insert( 116 | part=",".join(body.keys()), 117 | body=body, 118 | media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True) 119 | ) 120 | 121 | return resumable_upload(insert_request) 122 | 123 | def resumable_upload(insert_request: MediaFileUpload): 124 | """ 125 | This method implements an exponential backoff strategy to resume a 126 | failed upload. 127 | 128 | Args: 129 | insert_request (MediaFileUpload): The request to insert the video. 130 | 131 | Returns: 132 | response: The response from the upload process. 133 | """ 134 | response = None 135 | error = None 136 | retry = 0 137 | while response is None: 138 | try: 139 | print(colored(" => Uploading file...", "magenta")) 140 | status, response = insert_request.next_chunk() 141 | if 'id' in response: 142 | print(f"Video id '{response['id']}' was successfully uploaded.") 143 | return response 144 | except HttpError as e: 145 | if e.resp.status in RETRIABLE_STATUS_CODES: 146 | error = f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}" 147 | else: 148 | raise 149 | except RETRIABLE_EXCEPTIONS as e: 150 | error = f"A retriable error occurred: {e}" 151 | 152 | if error is not None: 153 | print(colored(error, "red")) 154 | retry += 1 155 | if retry > MAX_RETRIES: 156 | raise Exception("No longer attempting to retry.") 157 | 158 | max_sleep = 2 ** retry 159 | sleep_seconds = random.random() * max_sleep 160 | print(colored(f" => Sleeping {sleep_seconds} seconds and then retrying...", "blue")) 161 | time.sleep(sleep_seconds) 162 | 163 | def upload_video(video_path, title, description, category, keywords, privacy_status): 164 | try: 165 | # Get the authenticated YouTube service 166 | youtube = get_authenticated_service() 167 | 168 | # Retrieve and print the channel ID for the authenticated user 169 | channels_response = youtube.channels().list(mine=True, part='id').execute() 170 | for channel in channels_response['items']: 171 | print(colored(f" => Channel ID: {channel['id']}", "blue")) 172 | 173 | # Initialize the upload process 174 | video_response = initialize_upload(youtube, { 175 | 'file': video_path, # The path to the video file 176 | 'title': title, 177 | 'description': description, 178 | 'category': category, 179 | 'keywords': keywords, 180 | 'privacyStatus': privacy_status 181 | }) 182 | return video_response # Return the response from the upload process 183 | except HttpError as e: 184 | print(colored(f"[-] An HTTP error {e.resp.status} occurred:\n{e.content}", "red")) 185 | if e.resp.status in [401, 403]: 186 | # Here you could refresh the credentials and retry the upload 187 | youtube = get_authenticated_service() # This will prompt for re-authentication if necessary 188 | video_response = initialize_upload(youtube, { 189 | 'file': video_path, 190 | 'title': title, 191 | 'description': description, 192 | 'category': category, 193 | 'keywords': keywords, 194 | 'privacyStatus': privacy_status 195 | }) 196 | return video_response 197 | else: 198 | raise e 199 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.10 3 | 4 | # Install system dependencies 5 | RUN apt-get -y update && \ 6 | apt-get -y install --no-install-recommends \ 7 | ffmpeg \ 8 | imagemagick \ 9 | procps \ 10 | fonts-liberation && \ 11 | apt-get clean && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | # Set up locale 15 | RUN apt-get update && apt-get install -y locales && \ 16 | locale-gen C.UTF-8 && \ 17 | /usr/sbin/update-locale LANG=C.UTF-8 && \ 18 | apt-get clean && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | ENV LC_ALL C.UTF-8 22 | 23 | # Modify ImageMagick policy to allow text operations 24 | RUN sed -i 's/none/read,write/g' /etc/ImageMagick-6/policy.xml 25 | 26 | # Set environment variables 27 | # ENV IMAGEMAGICK_BINARY=/usr/bin/convert 28 | ENV PYTHONUNBUFFERED=1 29 | 30 | WORKDIR /home/app 31 | 32 | # Copy only necessary files 33 | COPY requirements.txt . 34 | COPY Backend/ Backend/ 35 | 36 | # Install Python dependencies 37 | RUN pip install --no-cache-dir --upgrade pip && \ 38 | pip install --no-cache-dir -r requirements.txt 39 | 40 | 41 | WORKDIR /home/app/Backend 42 | # copy the .env file 43 | COPY .env . 44 | 45 | # Default command 46 | CMD ["python", "main.py"] 47 | -------------------------------------------------------------------------------- /Dockerfile.FE: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.10.4-slim-buster 3 | RUN pip install --upgrade pip 4 | 5 | RUN useradd -m myuser 6 | USER myuser 7 | WORKDIR /home/myuser 8 | 9 | COPY --chown=myuser:myuser ./Frontend ./ 10 | 11 | ENV PATH="/home/myuser/.local/bin:${PATH}" 12 | 13 | # python -m http.server 3001 14 | CMD [ "python", "-m", "http.server", "3000" ] -------------------------------------------------------------------------------- /Dockerfile.FE.Nuxt: -------------------------------------------------------------------------------- 1 | # Use node 18 as build image 2 | FROM node:18.19-slim 3 | 4 | # Install necessary build tools 5 | RUN apt-get update && \ 6 | apt-get install -y --no-install-recommends \ 7 | git \ 8 | procps \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | # Enable corepack and use pnpm 12 | RUN corepack enable && corepack prepare pnpm@latest --activate 13 | 14 | # Set working directory 15 | WORKDIR /app 16 | 17 | # Copy package files first for better caching 18 | COPY ./UI/package.json ./ 19 | 20 | # Set npm registry and install dependencies 21 | RUN npm config set registry https://registry.npmjs.org/ 22 | 23 | # Install dependencies with pnpm 24 | RUN pnpm i 25 | 26 | # Copy the rest of the application 27 | COPY ./UI . 28 | COPY .env . 29 | 30 | # Build the application 31 | RUN pnpm build 32 | 33 | # Expose port 3000 34 | ENV HOST=0.0.0.0 35 | ENV PORT=3000 36 | EXPOSE 3000 37 | 38 | # Start the application 39 | CMD ["node", ".output/server/index.mjs"] 40 | -------------------------------------------------------------------------------- /EnvironmentVariables.md: -------------------------------------------------------------------------------- 1 | # Environment Variables 2 | 3 | ## Required 4 | 5 | - TIKTOK_SESSION_ID: Your TikTok session ID is required. Obtain it by logging into TikTok in your browser and copying the value of the `sessionid` cookie. 6 | 7 | - IMAGEMAGICK_BINARY: The filepath to the ImageMagick binary (.exe file) is needed. Obtain it [here](https://imagemagick.org/script/download.php). 8 | 9 | - PEXELS_API_KEY: Your unique Pexels API key is required. Obtain yours [here](https://www.pexels.com/api/). 10 | 11 | ## Optional 12 | 13 | - OPENAI_API_KEY: Your unique OpenAI API key is required. Obtain yours [here](https://platform.openai.com/api-keys), only nessecary if you want to use the OpenAI models. 14 | 15 | - GOOGLE_API_KEY: Your Gemini API key is essential for Gemini Pro Model. Generate one securely at [Get API key | Google AI Studio](https://makersuite.google.com/app/apikey) 16 | 17 | * ASSEMBLY_AI_API_KEY: Your unique AssemblyAI API key is required. You can obtain one [here](https://www.assemblyai.com/app/). This field is optional; if left empty, the subtitle will be created based on the generated script. Subtitles can also be created locally. 18 | 19 | Join the [Discord](https://dsc.gg/fuji-community) for support and updates. 20 | -------------------------------------------------------------------------------- /Frontend/app.js: -------------------------------------------------------------------------------- 1 | const videoSubject = document.querySelector("#videoSubject"); 2 | const aiModel = document.querySelector("#aiModel"); 3 | const voice = document.querySelector("#voice"); 4 | const zipUrl = document.querySelector("#zipUrl"); 5 | const paragraphNumber = document.querySelector("#paragraphNumber"); 6 | const youtubeToggle = document.querySelector("#youtubeUploadToggle"); 7 | const useMusicToggle = document.querySelector("#useMusicToggle"); 8 | const customPrompt = document.querySelector("#customPrompt"); 9 | const generateButton = document.querySelector("#generateButton"); 10 | const cancelButton = document.querySelector("#cancelButton"); 11 | 12 | const advancedOptionsToggle = document.querySelector("#advancedOptionsToggle"); 13 | 14 | advancedOptionsToggle.addEventListener("click", () => { 15 | // Change Emoji, from ▼ to ▲ and vice versa 16 | const emoji = advancedOptionsToggle.textContent; 17 | advancedOptionsToggle.textContent = emoji.includes("▼") 18 | ? "Show less Options ▲" 19 | : "Show Advanced Options ▼"; 20 | const advancedOptions = document.querySelector("#advancedOptions"); 21 | advancedOptions.classList.toggle("hidden"); 22 | }); 23 | 24 | 25 | const cancelGeneration = () => { 26 | console.log("Canceling generation..."); 27 | // Send request to /cancel 28 | fetch("http://localhost:8080/api/cancel", { 29 | method: "POST", 30 | headers: { 31 | "Content-Type": "application/json", 32 | Accept: "application/json", 33 | }, 34 | }) 35 | .then((response) => response.json()) 36 | .then((data) => { 37 | alert(data.message); 38 | console.log(data); 39 | }) 40 | .catch((error) => { 41 | alert("An error occurred. Please try again later."); 42 | console.log(error); 43 | }); 44 | 45 | // Hide cancel button 46 | cancelButton.classList.add("hidden"); 47 | 48 | // Enable generate button 49 | generateButton.disabled = false; 50 | generateButton.classList.remove("hidden"); 51 | }; 52 | 53 | const generateVideo = () => { 54 | console.log("Generating video..."); 55 | // Disable button and change text 56 | generateButton.disabled = true; 57 | generateButton.classList.add("hidden"); 58 | 59 | // Show cancel button 60 | cancelButton.classList.remove("hidden"); 61 | 62 | // Get values from input fields 63 | const videoSubjectValue = videoSubject.value; 64 | const aiModelValue = aiModel.value; 65 | const voiceValue = voice.value; 66 | const paragraphNumberValue = paragraphNumber.value; 67 | const youtubeUpload = youtubeToggle.checked; 68 | const useMusicToggleState = useMusicToggle.checked; 69 | const threads = document.querySelector("#threads").value; 70 | const zipUrlValue = zipUrl.value; 71 | const customPromptValue = customPrompt.value; 72 | const subtitlesPosition = document.querySelector("#subtitlesPosition").value; 73 | 74 | const url = "http://localhost:8080/api/generate"; 75 | 76 | // Construct data to be sent to the server 77 | const data = { 78 | videoSubject: videoSubjectValue, 79 | aiModel: aiModelValue, 80 | voice: voiceValue, 81 | paragraphNumber: paragraphNumberValue, 82 | automateYoutubeUpload: youtubeUpload, 83 | useMusic: useMusicToggleState, 84 | zipUrl: zipUrlValue, 85 | threads: threads, 86 | subtitlesPosition: subtitlesPosition, 87 | customPrompt: customPromptValue, 88 | }; 89 | 90 | // Send the actual request to the server 91 | fetch(url, { 92 | method: "POST", 93 | body: JSON.stringify(data), 94 | headers: { 95 | "Content-Type": "application/json", 96 | Accept: "application/json", 97 | }, 98 | }) 99 | .then((response) => response.json()) 100 | .then((data) => { 101 | console.log(data); 102 | alert(data.message); 103 | // Hide cancel button after generation is complete 104 | generateButton.disabled = false; 105 | generateButton.classList.remove("hidden"); 106 | cancelButton.classList.add("hidden"); 107 | }) 108 | .catch((error) => { 109 | alert("An error occurred. Please try again later."); 110 | console.log(error); 111 | }); 112 | }; 113 | 114 | generateButton.addEventListener("click", generateVideo); 115 | cancelButton.addEventListener("click", cancelGeneration); 116 | 117 | videoSubject.addEventListener("keyup", (event) => { 118 | if (event.key === "Enter") { 119 | generateVideo(); 120 | } 121 | }); 122 | 123 | // Load the data from localStorage on page load 124 | document.addEventListener("DOMContentLoaded", (event) => { 125 | const voiceSelect = document.getElementById("voice"); 126 | const storedVoiceValue = localStorage.getItem("voiceValue"); 127 | 128 | if (storedVoiceValue) { 129 | voiceSelect.value = storedVoiceValue; 130 | } 131 | }); 132 | 133 | // When the voice select field changes, store the new value in localStorage. 134 | document.getElementById("voice").addEventListener("change", (event) => { 135 | localStorage.setItem("voiceValue", event.target.value); 136 | }); 137 | -------------------------------------------------------------------------------- /Frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | 6 |21 | This Application is intended to automate the creation and uploads of 22 | YouTube Shorts. 23 |
24 | 25 |99 | {{ video }} 100 |
101 |108 | No Instagram videos available. Download some videos first. 109 |
110 |Generating video | script ...
384 | 385 |