├── static ├── assets │ ├── fonts │ │ ├── .gitKeep │ │ └── bold_font.ttf │ ├── music │ │ └── .gitKeep │ └── images │ │ ├── Screen1.png │ │ ├── Screenshot2.png │ │ └── Screenshot3.png └── generated_videos │ └── .gitKeep ├── UI ├── .npmrc ├── assets │ └── scss │ │ ├── main.scss │ │ └── helpers │ │ └── _transition.scss ├── server │ └── tsconfig.json ├── public │ └── favicon.ico ├── uno.config.ts ├── .vscode │ └── settings.json ├── pages │ ├── docs │ │ └── [...slug].vue │ ├── search.vue │ ├── index.vue │ ├── videos │ │ └── index.vue │ ├── settings.vue │ └── generate │ │ └── index.vue ├── tsconfig.json ├── tailwind.config.ts ├── .gitignore ├── types │ ├── Search │ │ └── index.ts │ ├── Menu │ │ └── index.ts │ └── Project │ │ └── Settings.ts ├── components │ ├── ErrorView.vue │ ├── SubtitleSettings.vue │ ├── VoiceSettings.vue │ ├── ActionIcon.vue │ ├── RedirectView.vue │ ├── GenerateScript.vue │ ├── HeaderLayout.vue │ ├── ToolTipper.vue │ ├── MusicSettings.vue │ ├── AllSettings.vue │ ├── VideoSelected.vue │ ├── SearchTrigger.vue │ ├── VideosTable.vue │ ├── NaiveLayoutSidebar.vue │ ├── VideoSearch.vue │ ├── LayoutTabs.vue │ └── SearchDialog.vue ├── composables │ ├── useSearchDialog.ts │ ├── useGlobalSettings.ts │ ├── useMenuSetting.ts │ ├── useVideoSetings.ts │ └── useTabs.ts ├── app.vue ├── app.config.ts ├── utils │ ├── PlatformUtils.ts │ ├── ScreenUtils.ts │ ├── RouteHelpers.ts │ └── mitt.ts ├── content │ └── docs │ │ ├── how-to-use.md │ │ ├── road-map.md │ │ └── index.md ├── nuxt.config.ts ├── package.json ├── README.md ├── locales │ └── en-US.json ├── layouts │ └── default.vue └── stores │ ├── AppStore.ts │ └── TabsStore.ts ├── logo.jpeg ├── .github ├── FUNDING.yml └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── .vscode └── settings.json ├── .gitignore ├── Dockerfile.FE ├── requirements.txt ├── docker-compose.yml ├── Dockerfile.FE.Nuxt ├── Dockerfile ├── .env.example ├── LICENSE ├── EnvironmentVariables.md ├── Backend ├── search.py ├── settings.py ├── utils.py ├── gpt.py ├── tiktokvoice.py ├── youtube.py ├── video.py ├── classes │ └── Shorts.py └── main.py ├── README.md └── Frontend ├── app.js └── index.html /static/assets/fonts/.gitKeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /static/assets/music/.gitKeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UI/.npmrc: -------------------------------------------------------------------------------- 1 | shamefully-hoist=true -------------------------------------------------------------------------------- /static/generated_videos/.gitKeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UI/assets/scss/main.scss: -------------------------------------------------------------------------------- 1 | @import "./helpers/transition"; 2 | -------------------------------------------------------------------------------- /logo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/logo.jpeg -------------------------------------------------------------------------------- /UI/server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../.nuxt/tsconfig.server.json" 3 | } 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [leamsigc] 4 | -------------------------------------------------------------------------------- /UI/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/UI/public/favicon.ico -------------------------------------------------------------------------------- /static/assets/fonts/bold_font.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/static/assets/fonts/bold_font.ttf -------------------------------------------------------------------------------- /static/assets/images/Screen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/static/assets/images/Screen1.png -------------------------------------------------------------------------------- /UI/uno.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "unocss"; 2 | 3 | export default defineConfig({ 4 | // ...UnoCSS options 5 | }); 6 | -------------------------------------------------------------------------------- /static/assets/images/Screenshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/static/assets/images/Screenshot2.png -------------------------------------------------------------------------------- /static/assets/images/Screenshot3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hacksider/ShortsGenerator/HEAD/static/assets/images/Screenshot3.png -------------------------------------------------------------------------------- /UI/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "node_modules/typescript/lib", 3 | "i18n-ally.localesPaths": [ 4 | "locales" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /UI/pages/docs/[...slug].vue: -------------------------------------------------------------------------------- 1 | 6 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "node_modules/typescript/lib", 3 | "i18n-ally.localesPaths": ["UI/locales"], 4 | "i18n-ally.keystyle": "nested" 5 | } 6 | -------------------------------------------------------------------------------- /UI/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // https://nuxt.com/docs/guide/concepts/typescript 3 | "extends": "./.nuxt/tsconfig.json" 4 | // "include": ["@types/tabulator-tables"] 5 | } 6 | -------------------------------------------------------------------------------- /UI/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | export default >{ 4 | darkMode: "class", 5 | plugins: [require("@tailwindcss/typography")], 6 | }; 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .env 3 | temp/* 4 | sounds/* 5 | output/* 6 | images/* 7 | *.zip 8 | *.srt 9 | *.mp4 10 | *.mp3 11 | .history 12 | subtitles/* 13 | /venv 14 | client_secret.json 15 | main.py-oauth2.json 16 | .DS_Store 17 | Backend/output* 18 | Songs/ 19 | node_modules 20 | /UI/.nuxt -------------------------------------------------------------------------------- /UI/.gitignore: -------------------------------------------------------------------------------- 1 | # Nuxt dev/build outputs 2 | .output 3 | .data 4 | .nuxt 5 | .nitro 6 | .cache 7 | dist 8 | 9 | # Node dependencies 10 | node_modules 11 | 12 | # Logs 13 | logs 14 | *.log 15 | 16 | # Misc 17 | .DS_Store 18 | .fleet 19 | .idea 20 | 21 | # Local env files 22 | .env 23 | .env.* 24 | !.env.example 25 | -------------------------------------------------------------------------------- /Dockerfile.FE: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.10.4-slim-buster 3 | RUN pip install --upgrade pip 4 | 5 | RUN useradd -m myuser 6 | USER myuser 7 | WORKDIR /home/myuser 8 | 9 | COPY --chown=myuser:myuser ./Frontend ./ 10 | 11 | ENV PATH="/home/myuser/.local/bin:${PATH}" 12 | 13 | # python -m http.server 3001 14 | CMD [ "python", "-m", "http.server", "3000" ] -------------------------------------------------------------------------------- /UI/pages/search.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/types/Search/index.ts: -------------------------------------------------------------------------------- 1 | export interface SearchGroupItem { 2 | iconName: string | null | undefined; 3 | iconImage: string | null; 4 | key: number | string; 5 | title: string; 6 | label: string; 7 | tags?: string; 8 | action: () => void; 9 | } 10 | 11 | export interface SearchGroup { 12 | name: string; 13 | items: SearchGroupItem[]; 14 | } 15 | export type SearchGroups = SearchGroup[]; 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | g4f==0.2.0.7 2 | setuptools 3 | wheel 4 | requests==2.31.0 5 | moviepy==1.0.3 6 | termcolor==2.4.0 7 | flask==3.0.0 8 | flask-cors==4.0.0 9 | playsound==1.3.0 10 | Pillow==9.5.0 11 | python-dotenv==1.0.0 12 | srt_equalizer==0.1.8 13 | platformdirs==4.1.0 14 | undetected_chromedriver 15 | assemblyai 16 | brotli 17 | google-api-python-client 18 | oauth2client 19 | openai 20 | google-generativeai 21 | -------------------------------------------------------------------------------- /UI/components/ErrorView.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/composables/useSearchDialog.ts: -------------------------------------------------------------------------------- 1 | import { ref } from "vue"; 2 | 3 | const listener = ref(); 4 | export function useSearchDialog() { 5 | const commandIcon = ref(isWindows() ? "CTRL" : "⌘"); 6 | return { 7 | commandIcon, 8 | trigger: (cb: () => void): void => { 9 | listener.value = cb; 10 | }, 11 | open: (): void => { 12 | listener.value && listener.value(); 13 | }, 14 | }; 15 | } 16 | -------------------------------------------------------------------------------- /UI/components/SubtitleSettings.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 18 | 19 | -------------------------------------------------------------------------------- /UI/app.vue: -------------------------------------------------------------------------------- 1 | 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | services: 3 | api: 4 | build: 5 | context: . 6 | dockerfile: Dockerfile 7 | ports: 8 | - 8080:8080 9 | env_file: 10 | - .env 11 | volumes: 12 | - .:/home/app 13 | frontend: 14 | build: 15 | context: . 16 | dockerfile: Dockerfile.FE 17 | ports: 18 | - ${FE_PORT}:3000 19 | nuxt: 20 | build: 21 | context: . 22 | dockerfile: Dockerfile.FE.Nuxt 23 | ports: 24 | - ${FE_NUXT}:3000 25 | -------------------------------------------------------------------------------- /UI/pages/index.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 21 | 22 | -------------------------------------------------------------------------------- /UI/types/Menu/index.ts: -------------------------------------------------------------------------------- 1 | import type { RouteMeta } from "vue-router"; 2 | 3 | export enum RoleConstants { 4 | ADMIN = "admin", 5 | USER = "user", 6 | GUEST = "guest", 7 | } 8 | 9 | export interface Menu { 10 | name: string; 11 | icon?: string; 12 | path: string; 13 | paramPath?: string; 14 | shouldDisabled?: boolean; 15 | children?: Menu[]; 16 | orderNumber?: number; 17 | allowedRoles?: RoleConstants[]; 18 | meta?: Partial; 19 | shouldHideMenu?: boolean; 20 | description?: string; 21 | data?: Record; 22 | shouldShow?: boolean; 23 | } 24 | -------------------------------------------------------------------------------- /Dockerfile.FE.Nuxt: -------------------------------------------------------------------------------- 1 | # use node 16 alpine image as build image 2 | FROM node:18.9.0 as builder 3 | # RUN corepack enable 4 | RUN npm install -g pnpm 5 | # create work directory in app folder 6 | WORKDIR /app 7 | 8 | # copy over package.json files 9 | COPY ./UI . 10 | RUN npm config set registry https://registry.npmjs.org/ 11 | RUN pnpm i --ignore-scripts --unsafe-perm 12 | ENV PATH /usr/app/frontend/node_modules/.bin:$PATH 13 | RUN pnpm build 14 | 15 | # expose the host and port 3000 to the server 16 | ENV HOST 0.0.0.0 17 | EXPOSE 3000 18 | 19 | # run the build project with node 20 | ENTRYPOINT ["node", ".output/server/index.mjs"] 21 | -------------------------------------------------------------------------------- /UI/composables/useGlobalSettings.ts: -------------------------------------------------------------------------------- 1 | 2 | export const useApiSettings = () => { 3 | const API_SETTINGS = useLocalStorage("API_SETTINGS", { 4 | URL:"http://localhost:8080", 5 | }) 6 | return { 7 | API_SETTINGS 8 | } 9 | } 10 | export const useGlobalSettings = () => { 11 | const globalSettings = useLocalStorage("globalSettings", { 12 | font: "Roboto", 13 | color: "#000", 14 | subtitles_position: "center,bottom", 15 | fontsize: 20, 16 | stroke_color: "#000", 17 | stroke_width: 5, 18 | aiModel: "g4f", 19 | voice: "en_us_001", 20 | }); 21 | 22 | return { 23 | globalSettings 24 | }; 25 | } -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: 'leamsigc' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3 3 | RUN apt-get -y update && apt-get -y install ffmpeg imagemagick procps 4 | RUN apt-get -y install fonts-liberation 5 | RUN pip install --upgrade pip 6 | # Install some special fonts we use in testing, etc.. 7 | 8 | RUN apt-get install -y locales && \ 9 | locale-gen C.UTF-8 && \ 10 | /usr/sbin/update-locale LANG=C.UTF-8 11 | 12 | ENV LC_ALL C.UTF-8 13 | # modify ImageMagick policy file so that Textclips work correctly. 14 | RUN sed -i 's/none/read,write/g' /etc/ImageMagick-6/policy.xml 15 | 16 | WORKDIR /home/app 17 | 18 | COPY requirements.txt requirements.txt 19 | COPY . . 20 | RUN pip install -r requirements.txt 21 | 22 | WORKDIR /home/app/Backend 23 | CMD ["python", "main.py"] 24 | -------------------------------------------------------------------------------- /UI/app.config.ts: -------------------------------------------------------------------------------- 1 | import { _colors, _fontFamily } from "#tailwind-config/theme.mjs"; 2 | 3 | export default defineAppConfig({ 4 | naiveui: { 5 | themeConfig: { 6 | shared: { 7 | common: { 8 | fontFamily: _fontFamily.sans.join(", "), 9 | }, 10 | }, 11 | light: { 12 | common: { 13 | primaryColor: _colors.blue[600], 14 | primaryColorHover: _colors.blue[500], 15 | primaryColorPressed: _colors.blue[700], 16 | }, 17 | }, 18 | dark: { 19 | common: { 20 | primaryColor: _colors.blue[500], 21 | primaryColorHover: _colors.blue[400], 22 | primaryColorPressed: _colors.blue[600], 23 | }, 24 | }, 25 | }, 26 | }, 27 | }); 28 | -------------------------------------------------------------------------------- /UI/utils/PlatformUtils.ts: -------------------------------------------------------------------------------- 1 | export enum OperatingSystem { 2 | Windows = "Windows", 3 | MacOS = "MacOS", 4 | UNIX = "UNIX", 5 | Linux = "Linux", 6 | Unknown = "Unknown", 7 | } 8 | export type OS = keyof typeof OperatingSystem; 9 | export function detectOperatingSystem(): OS { 10 | const { userAgent } = navigator || { userAgent: "" }; 11 | if (userAgent.includes("Win")) { 12 | return OperatingSystem.Windows; 13 | } 14 | if (userAgent.includes("Mac")) { 15 | return OperatingSystem.MacOS; 16 | } 17 | if (userAgent.includes("X11")) { 18 | return OperatingSystem.UNIX; 19 | } 20 | if (userAgent.includes("Linux")) { 21 | return OperatingSystem.Linux; 22 | } 23 | 24 | return OperatingSystem.Unknown; 25 | } 26 | export function isWindows(): boolean { 27 | return detectOperatingSystem() === OperatingSystem.Windows; 28 | } 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: leamsigc 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. Linux, Windows] 28 | - Browser [e.g. chrome, edge] 29 | - Python Version [e.g. 3.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /UI/composables/useMenuSetting.ts: -------------------------------------------------------------------------------- 1 | import type { MenuSetting } from "~/types/Project/Settings"; 2 | 3 | export function useMenuSetting() { 4 | console.log("useMenuSetting"); 5 | 6 | const appStore = useAppStore(); 7 | 8 | const getCollapsed = computed(() => appStore.getMenuSetting.collapsed); 9 | 10 | function getMenuSetting() { 11 | return appStore.getMenuSetting; 12 | } 13 | 14 | // Set menu configuration 15 | function setMenuSetting(menuSetting: Partial): void { 16 | appStore.setProjectSetting({ menuSetting }); 17 | } 18 | 19 | function toggleCollapsed() { 20 | console.log("toggleCollapsed"); 21 | 22 | setMenuSetting({ 23 | collapsed: !unref(getCollapsed), 24 | }); 25 | } 26 | return { 27 | getMenuSetting, 28 | setMenuSetting, 29 | getCollapsed, 30 | toggleCollapsed, 31 | }; 32 | } 33 | -------------------------------------------------------------------------------- /UI/components/VoiceSettings.vue: -------------------------------------------------------------------------------- 1 | 26 | 27 | 32 | 33 | -------------------------------------------------------------------------------- /UI/components/ActionIcon.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /UI/components/RedirectView.vue: -------------------------------------------------------------------------------- 1 | 36 | 37 | 40 | 41 | -------------------------------------------------------------------------------- /UI/content/docs/how-to-use.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'Short generator how to use' 3 | description: 'Small tutorial on how to use the short generator' 4 | --- 5 | 6 | 7 | 8 | # How to use the Short Generator 9 | 10 | 11 | 1. Click on the "Generate" button to start the process of generating a new short 12 | 1. Enter a topic of what the short will be about 13 | 2. Add extra prompt information if needed 14 | 3. Review the script 15 | 4. Select a specific voice to use or set a global default voice for all generations 16 | 5. Update the search terms if needed 17 | 1. Or can search manually and select the videos that you like by clicking on them 18 | 2, view all the selected vieos -> Click on the "Search and select videos" button and then click on the tab "Selected Videos" to see all the videos that you have selected 19 | 6. Click on the "Generate" button 20 | 7. You can add your own music to the video by selecting a music track then click on "Add music" -------------------------------------------------------------------------------- /UI/nuxt.config.ts: -------------------------------------------------------------------------------- 1 | // https://nuxt.com/docs/api/configuration/nuxt-config 2 | export default defineNuxtConfig({ 3 | ssr: false, 4 | devtools: { enabled: true }, 5 | modules: [ 6 | "@bg-dev/nuxt-naiveui", 7 | "@vueuse/nuxt", 8 | "@nuxtjs/tailwindcss", 9 | "@nuxt/content", 10 | "nuxt-icon", 11 | "@pinia/nuxt", 12 | "@unocss/nuxt", 13 | "@nuxtjs/i18n", 14 | "nuxt-lodash", 15 | ], 16 | css: ["~/assets/scss/main.scss"], 17 | tailwindcss: { 18 | exposeConfig: { 19 | write: true, 20 | }, 21 | }, 22 | content: { 23 | markdown: { 24 | anchorLinks: false, 25 | }, 26 | }, 27 | i18n: { 28 | locales: [ 29 | { 30 | code: "en", 31 | file: "en-US.json", 32 | }, 33 | ], 34 | lazy: true, 35 | langDir: "locales", 36 | defaultLocale: "en", 37 | }, 38 | runtimeConfig: { 39 | public: { 40 | pexelsApiKey: process.env.PEXELS_API_KEY, 41 | }, 42 | }, 43 | }); 44 | -------------------------------------------------------------------------------- /UI/composables/useVideoSetings.ts: -------------------------------------------------------------------------------- 1 | export interface VideoResultFormat { 2 | url: string; 3 | image: string; 4 | videoUrl?: { 5 | fileType: string; 6 | link: string; 7 | quality: string; 8 | }; 9 | } 10 | 11 | export const useVideoSettings = () => { 12 | const video = useLocalStorage<{ 13 | script: string; 14 | voice: string; 15 | videoSubject: string; 16 | extraPrompt: string; 17 | search: string; 18 | aiModel: string; 19 | finalVideoUrl: string; 20 | selectedAudio: string; 21 | selectedVideoUrls: VideoResultFormat[]; 22 | }>('VideoSettings',{ 23 | script: "", 24 | voice: "en_us_001", 25 | videoSubject: "", 26 | extraPrompt: "", 27 | search: "", 28 | 29 | aiModel: "g4f", 30 | 31 | finalVideoUrl: "", 32 | // Audio related 33 | 34 | selectedAudio: "", 35 | selectedVideoUrls: [], 36 | }); 37 | 38 | 39 | return { video } 40 | } -------------------------------------------------------------------------------- /UI/utils/ScreenUtils.ts: -------------------------------------------------------------------------------- 1 | import { breakpointsTailwind, useBreakpoints } from "@vueuse/core"; 2 | 3 | export const breakpoints = useBreakpoints(breakpointsTailwind); 4 | export const isMediumOrLargeScreen = breakpoints.between("sm", "xl"); 5 | export const isExtraLargeScreen = breakpoints.smallerOrEqual("xl"); 6 | export const isSmallerOrEqualSm = breakpoints.smallerOrEqual("sm"); 7 | export const isSmallerOrEqualMd = breakpoints.smallerOrEqual("md"); 8 | export const isSmallerOrEqualLg = breakpoints.smallerOrEqual("lg"); 9 | export const isSmallerOrEqualXl = breakpoints.smallerOrEqual("xl"); 10 | export const isSmallerOrEqual2xl = breakpoints.smallerOrEqual("2xl"); 11 | export const isGreaterOrEqualSm = breakpoints.greaterOrEqual("sm"); 12 | export const isGreaterOrEqualMd = breakpoints.greaterOrEqual("md"); 13 | export const isGreaterOrEqualLg = breakpoints.greaterOrEqual("lg"); 14 | export const isGreaterOrEqualXl = breakpoints.greaterOrEqual("xl"); 15 | export const isGreaterOrEqual2xl = breakpoints.greaterOrEqual("2xl"); 16 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # See EnvironmentVariables.md for more information. 2 | 3 | # Necessary API Keys 4 | # ------------------- 5 | 6 | # TikTok Session ID 7 | # Obtain your session ID by logging into TikTok and copying the sessionid cookie. 8 | TIKTOK_SESSION_ID="" 9 | 10 | # ImageMagick Binary Path 11 | # Download ImageMagick from https://imagemagick.org/script/download.php 12 | IMAGEMAGICK_BINARY="" 13 | 14 | # Pexels API Key 15 | # Register at https://www.pexels.com/api/ to get your API key. 16 | PEXELS_API_KEY="" 17 | 18 | # Optional API Keys 19 | # ----------------- 20 | 21 | # OpenAI API Key 22 | # Visit https://openai.com/api/ for details on obtaining an API key. 23 | OPENAI_API_KEY="" 24 | 25 | # AssemblyAI API Key 26 | # Sign up at https://www.assemblyai.com/ to receive an API key. 27 | ASSEMBLY_AI_API_KEY="" 28 | 29 | # Google API Key 30 | # Generate your API key through https://makersuite.google.com/app/apikey 31 | GOOGLE_API_KEY="" 32 | 33 | # Front end port 34 | FE_PORT=3000 35 | # Alternate front end port 36 | FE_NUXT=5000 -------------------------------------------------------------------------------- /UI/content/docs/road-map.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | #Project Roadmap: 4 | 5 | - [x] Generate the script first 6 | - [x] Let user review the script before the audio and video generation 7 | - [x] Let the user view all the generated video in a single place 8 | - [x] Let user view the generated video in the browser 9 | - [x] Let user select the audio music to add to the video 10 | 11 | - [ ] Update the view to have a better user experience 12 | - [ ] Let user preview the generate video in the same view and let user iterated on the video 13 | - [ ] Let user download the generated video 14 | - [ ] Let user upload videos to be use in the video it self 15 | - [ ] Let user upload audio to be use in the video it self 16 | - [ ] Let user have general configuration 17 | - [ ] Let add multiple video link to download 18 | - [ ] Let user select the font and upload font 19 | - [ ] Let user select the color for the text 20 | 21 | ### Features 🚀 planes: 22 | - [ ] Let user schedule the video upload to [youtube,facebook bussines,linkedin] 23 | - [ ] Let user create video from the calendar and schedule it to be uploaded -------------------------------------------------------------------------------- /UI/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nuxt-app", 3 | "private": true, 4 | "type": "module", 5 | "scripts": { 6 | "build": "nuxt build", 7 | "dev": "nuxt dev --dotenv ../.env ", 8 | "generate": "nuxt generate", 9 | "preview": "nuxt preview", 10 | "postinstall": "nuxt prepare", 11 | "start": "node .output/server/index.mjs" 12 | }, 13 | "dependencies": { 14 | "@pinia/nuxt": "^0.5.1", 15 | "@unocss/nuxt": "^0.58.5", 16 | "nuxt": "^3.10.1", 17 | "tabulator-tables": "^5.6.0", 18 | "vue": "^3.4.15", 19 | "vue-router": "^4.2.5" 20 | }, 21 | "devDependencies": { 22 | "@bg-dev/nuxt-naiveui": "^1.10.1", 23 | "@nuxt/content": "^2.12.0", 24 | "@nuxt/devtools": "^1.0.8", 25 | "@nuxtjs/i18n": "^8.1.1", 26 | "@nuxtjs/tailwindcss": "^6.11.3", 27 | "@tailwindcss/typography": "^0.5.10", 28 | "@types/tabulator-tables": "^5.5.9", 29 | "@vueuse/nuxt": "^10.7.2", 30 | "nuxt-icon": "^0.6.8", 31 | "nuxt-lodash": "^2.5.3", 32 | "sass": "^1.71.0" 33 | }, 34 | "resolutions": { 35 | "vue": "3.3.13" 36 | } 37 | } -------------------------------------------------------------------------------- /UI/composables/useTabs.ts: -------------------------------------------------------------------------------- 1 | import type { RouteLocationNormalized, Router } from "vue-router"; 2 | import { useRouter } from "vue-router"; 3 | import { unref } from "vue"; 4 | import { useTabsStore } from "../stores/TabsStore"; 5 | export function useTabs(_router?: Router) { 6 | console.log("useTabs"); 7 | 8 | const tabStore = useTabsStore(); 9 | const router = _router || useRouter(); 10 | 11 | const { currentRoute } = router; 12 | 13 | function getCurrentTab() { 14 | const route = unref(currentRoute); 15 | return tabStore.getTabsList.find( 16 | (item) => item.fullPath === route.fullPath 17 | )!; 18 | } 19 | 20 | return { 21 | getTabsList: () => tabStore.getTabsList, 22 | getPinnedTabsList: () => tabStore.getTabsList, 23 | getLimitTabsList: () => tabStore.getTabsList, 24 | closeTab: (tab: Tab) => tabStore.closeTab(tab), 25 | closePinnedTab: (tab: Tab) => tabStore.closePinnedTab(tab), 26 | addTab: (route: RouteLocationNormalized) => tabStore.addTab(route), 27 | pinnedTab: (tab: Tab) => tabStore.pinnedTab(tab), 28 | getCurrentTab, 29 | }; 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 leamsigc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /UI/README.md: -------------------------------------------------------------------------------- 1 | # Nuxt 3 Minimal Starter 2 | 3 | Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more. 4 | 5 | ## Setup 6 | 7 | Make sure to install the dependencies: 8 | 9 | ```bash 10 | # npm 11 | npm install 12 | 13 | # pnpm 14 | pnpm install 15 | 16 | # yarn 17 | yarn install 18 | 19 | # bun 20 | bun install 21 | ``` 22 | 23 | ## Development Server 24 | 25 | Start the development server on `http://localhost:3000`: 26 | 27 | ```bash 28 | # npm 29 | npm run dev 30 | 31 | # pnpm 32 | pnpm run dev 33 | 34 | # yarn 35 | yarn dev 36 | 37 | # bun 38 | bun run dev 39 | ``` 40 | 41 | ## Production 42 | 43 | Build the application for production: 44 | 45 | ```bash 46 | # npm 47 | npm run build 48 | 49 | # pnpm 50 | pnpm run build 51 | 52 | # yarn 53 | yarn build 54 | 55 | # bun 56 | bun run build 57 | ``` 58 | 59 | Locally preview production build: 60 | 61 | ```bash 62 | # npm 63 | npm run preview 64 | 65 | # pnpm 66 | pnpm run preview 67 | 68 | # yarn 69 | yarn preview 70 | 71 | # bun 72 | bun run preview 73 | ``` 74 | 75 | Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. 76 | -------------------------------------------------------------------------------- /UI/utils/RouteHelpers.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | RouteLocationNormalized, 3 | RouteRecordNormalized, 4 | } from "vue-router"; 5 | 6 | export function getRawRoute( 7 | route: RouteLocationNormalized 8 | ): RouteLocationNormalized { 9 | if (!route) { 10 | return route; 11 | } 12 | const { matched, ...otherProps } = route; 13 | return { 14 | ...otherProps, 15 | matched: matched?.map(({ meta, name, path }) => ({ 16 | meta, 17 | name, 18 | path, 19 | })) as RouteRecordNormalized[], 20 | }; 21 | } 22 | 23 | const key = Symbol("route change event"); 24 | const emitter = mitt<{ [key]: RouteLocationNormalized }>(); 25 | let lastTab: RouteLocationNormalized; 26 | 27 | export function notifyRouteChange(newRoute: RouteLocationNormalized) { 28 | const rawRoute = getRawRoute(newRoute); 29 | emitter.emit(key, rawRoute); 30 | lastTab = rawRoute; 31 | } 32 | 33 | export function listenToRouteChange( 34 | callback: (route: RouteLocationNormalized) => void, 35 | immediate = true 36 | ) { 37 | emitter.on(key, callback); 38 | immediate && lastTab && callback(lastTab); 39 | } 40 | 41 | export function removeRouteChangeListener() { 42 | emitter.all.clear(); 43 | } 44 | -------------------------------------------------------------------------------- /UI/components/GenerateScript.vue: -------------------------------------------------------------------------------- 1 | 22 | 23 | 46 | 47 | -------------------------------------------------------------------------------- /EnvironmentVariables.md: -------------------------------------------------------------------------------- 1 | # Environment Variables 2 | 3 | ## Required 4 | 5 | - TIKTOK_SESSION_ID: Your TikTok session ID is required. Obtain it by logging into TikTok in your browser and copying the value of the `sessionid` cookie. 6 | 7 | - IMAGEMAGICK_BINARY: The filepath to the ImageMagick binary (.exe file) is needed. Obtain it [here](https://imagemagick.org/script/download.php). 8 | 9 | - PEXELS_API_KEY: Your unique Pexels API key is required. Obtain yours [here](https://www.pexels.com/api/). 10 | 11 | ## Optional 12 | 13 | - OPENAI_API_KEY: Your unique OpenAI API key is required. Obtain yours [here](https://platform.openai.com/api-keys), only nessecary if you want to use the OpenAI models. 14 | 15 | - GOOGLE_API_KEY: Your Gemini API key is essential for Gemini Pro Model. Generate one securely at [Get API key | Google AI Studio](https://makersuite.google.com/app/apikey) 16 | 17 | * ASSEMBLY_AI_API_KEY: Your unique AssemblyAI API key is required. You can obtain one [here](https://www.assemblyai.com/app/). This field is optional; if left empty, the subtitle will be created based on the generated script. Subtitles can also be created locally. 18 | 19 | Join the [Discord](https://dsc.gg/fuji-community) for support and updates. 20 | -------------------------------------------------------------------------------- /UI/components/HeaderLayout.vue: -------------------------------------------------------------------------------- 1 | 20 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /UI/pages/videos/index.vue: -------------------------------------------------------------------------------- 1 | 21 | 22 | 52 | 53 | -------------------------------------------------------------------------------- /UI/components/ToolTipper.vue: -------------------------------------------------------------------------------- 1 | 30 | 31 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /UI/locales/en-US.json: -------------------------------------------------------------------------------- 1 | { 2 | "layouts": { 3 | "header": { 4 | "toggleFullScreen": "Full Screen" 5 | } 6 | }, 7 | "searchDialog": { 8 | "searchPlaceholder": "Search...", 9 | "applications": "Applications", 10 | "chatBot": "Chat", 11 | "actions": "Actions", 12 | "action": "Full Screen", 13 | "noResultsFound": "No Resoults for the fallowing qury: ", 14 | "toSelectTooltip": "Select", 15 | "toNavigateTooltip": "Navigate", 16 | "actionsOptions": { 17 | "themeToggle": "Switch theme" 18 | } 19 | }, 20 | "video": { 21 | "generate": { 22 | "step": { 23 | "one": { 24 | "title": "Please enter a video subject", 25 | "cancel": "Cancel", 26 | "generate": "Generate script", 27 | "videoSubject": { 28 | "placeholder": "Video subject..." 29 | }, 30 | "extraPrompt": { 31 | "placeholder": "Extra prompt..." 32 | } 33 | }, 34 | "two": { 35 | "script": { 36 | "placeholder": "Video generated script..." 37 | } 38 | } 39 | } 40 | } 41 | }, 42 | "view": { 43 | "generate": { 44 | "setting": { 45 | "label": "Settings" 46 | }, 47 | "music": { 48 | "label": "Music" 49 | }, 50 | "voice": { 51 | "label": "Voice" 52 | }, 53 | "subtitles": { 54 | "label": "Subtitle" 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /UI/content/docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'MoneyPrinter Documentation' 3 | description: 'MoneyPrinter Documentation' 4 | --- 5 | 6 | 7 | # Documentation related to the Money Printer UI 8 | 9 | [Project roadmap](/docs/road-map) | [How to use](/docs/how-to-use) 10 | 11 | ## Getting started 12 | 13 | 14 | 15 | ### We have two options to get started 16 | 17 | 18 | #### Option one: ***Local installation*** 19 | 20 | 21 | 22 | #### Install requirements 23 | ```bash 24 | pip install -r requirements.txt 25 | ``` 26 | #### Copy .env.example and fill out values 27 | ```bash 28 | cp .env.example .env 29 | ``` 30 | #### Run the backend server 31 | ```bash 32 | cd Backend 33 | python main.py 34 | ``` 35 | #### Run the frontend server 36 | ```bash 37 | cd ../Frontend 38 | python -m http.server 3000 39 | ``` 40 | #### Run the nuxt front end 41 | ```bash 42 | cd ../UI 43 | npm install 44 | npm run dev 45 | 46 | ``` 47 | 48 | 49 | 50 | #### Option one: ***Docker container*** 51 | 52 | 53 | 1. Build the docker image 54 | ```bash 55 | docker-compose build --no-cache 56 | ``` 57 | 2. Run the docker container 58 | ```bash 59 | docker-compose up -d 60 | ``` 61 | 62 | 3. The fallowing port urls will be available 63 | 64 | 65 | [Backend](http://localhost:8080) 66 | 67 | [Frontend](http://localhost:3000) -> Basic frontend -> The port will be 3000 by default in the env but you can change it in the .env 68 | 69 | [Frontend](http://localhost:5000) -> Nuxt frontend -> The port will be 5000 by default in the env but you can change it in the .env -------------------------------------------------------------------------------- /UI/components/MusicSettings.vue: -------------------------------------------------------------------------------- 1 | 26 | 27 | 56 | 57 | -------------------------------------------------------------------------------- /UI/components/AllSettings.vue: -------------------------------------------------------------------------------- 1 | 32 | 33 | 74 | 75 | -------------------------------------------------------------------------------- /UI/components/VideoSelected.vue: -------------------------------------------------------------------------------- 1 | 30 | 31 | 69 | 70 | -------------------------------------------------------------------------------- /UI/components/SearchTrigger.vue: -------------------------------------------------------------------------------- 1 | 19 | 20 | 33 | 34 | 85 | -------------------------------------------------------------------------------- /UI/layouts/default.vue: -------------------------------------------------------------------------------- 1 | 41 | 42 | 75 | 76 | -------------------------------------------------------------------------------- /Backend/search.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from typing import List 4 | from termcolor import colored 5 | 6 | def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]: 7 | """ 8 | Searches for stock videos based on a query. 9 | 10 | Args: 11 | query (str): The query to search for. 12 | api_key (str): The API key to use. 13 | 14 | Returns: 15 | List[str]: A list of stock videos. 16 | """ 17 | 18 | # Build headers 19 | headers = { 20 | "Authorization": api_key 21 | } 22 | 23 | # Build URL 24 | qurl = f"https://api.pexels.com/videos/search?query={query}&per_page={it}" 25 | 26 | # Send the request 27 | r = requests.get(qurl, headers=headers) 28 | 29 | # log response 30 | print(colored(f"Response: {r.status_code}", "green")) 31 | 32 | # Parse the response 33 | response = r.json() 34 | 35 | # Parse each video 36 | raw_urls = [] 37 | video_url = [] 38 | video_res = 0 39 | try: 40 | # loop through each video in the result 41 | for i in range(it): 42 | #check if video has desired minimum duration 43 | if response["videos"][i]["duration"] < min_dur: 44 | continue 45 | raw_urls = response["videos"][i]["video_files"] 46 | 47 | 48 | temp_video_url = "" 49 | 50 | # loop through each url to determine the best quality 51 | for video in raw_urls: 52 | # Check if video has a valid download link 53 | if ".com" in video["link"]: 54 | # Only save the URL with the largest resolution 55 | if (video["width"]*video["height"]) > video_res: 56 | temp_video_url = video["link"] 57 | video_res = video["width"]*video["height"] 58 | 59 | # add the url to the return list if it's not empty 60 | if temp_video_url != "": 61 | video_url.append(temp_video_url) 62 | 63 | except Exception as e: 64 | print(colored("[-] No Videos found.", "red")) 65 | print(colored(e, "red")) 66 | 67 | # Let user know 68 | print(colored(f"\t=> \"{query}\" found {len(video_url)} Videos", "cyan")) 69 | 70 | # Return the video url 71 | return video_url 72 | -------------------------------------------------------------------------------- /UI/assets/scss/helpers/_transition.scss: -------------------------------------------------------------------------------- 1 | /* zoom-fade */ 2 | .zoom-fade-leave-active, 3 | .zoom-fade-enter-active { 4 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 5 | } 6 | .zoom-fade-enter-from { 7 | opacity: 0; 8 | transform: scale(0.9); 9 | } 10 | .zoom-fade-enter-to { 11 | opacity: 1; 12 | transform: scale(1); 13 | } 14 | .zoom-fade-leave-to { 15 | opacity: 0; 16 | transform: scale(1.1); 17 | } 18 | 19 | /* zoom-out */ 20 | .zoom-out-leave-active, 21 | .zoom-out-enter-active { 22 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 23 | } 24 | .zoom-out-enter-from { 25 | opacity: 0; 26 | transform: scale(1.1); 27 | } 28 | .zoom-out-enter-to { 29 | opacity: 1; 30 | transform: scale(1); 31 | } 32 | .zoom-out-leave-to { 33 | opacity: 0; 34 | transform: scale(0.9); 35 | } 36 | 37 | /* fade-slide */ 38 | .fade-slide-leave-active, 39 | .fade-slide-enter-active { 40 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 41 | } 42 | .fade-slide-enter-from { 43 | opacity: 0; 44 | transform: translateX(100%); 45 | } 46 | .fade-slide-enter-to { 47 | opacity: 1; 48 | transform: translateX(0); 49 | } 50 | .fade-slide-leave-to { 51 | opacity: 0; 52 | transform: translateX(-100%); 53 | } 54 | 55 | /* fade */ 56 | .fade-leave-active, 57 | .fade-enter-active { 58 | transition: all 0.5s; 59 | } 60 | .fade-enter-from { 61 | opacity: 0; 62 | } 63 | .fade-leave-to { 64 | opacity: 0; 65 | } 66 | 67 | /* fade-bottom */ 68 | .fade-bottom-leave-active, 69 | .fade-bottom-enter-active { 70 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 71 | } 72 | .fade-bottom-enter-from { 73 | opacity: 0; 74 | transform: translateY(100%); 75 | } 76 | .fade-bottom-enter-to { 77 | opacity: 1; 78 | transform: translateY(0); 79 | } 80 | .fade-bottom-leave-to { 81 | opacity: 0; 82 | transform: translateY(-100%); 83 | } 84 | 85 | /* fade-scale */ 86 | .fade-scale-leave-active, 87 | .fade-scale-enter-active { 88 | transition: all 0.5s cubic-bezier(0.76, 0, 0.24, 1); 89 | } 90 | .fade-scale-enter-from { 91 | opacity: 0; 92 | transform: scale(0.9); 93 | } 94 | .fade-scale-enter-to { 95 | opacity: 1; 96 | transform: scale(1); 97 | } 98 | .fade-scale-leave-to { 99 | opacity: 0; 100 | transform: scale(0.9); 101 | } 102 | -------------------------------------------------------------------------------- /UI/components/VideosTable.vue: -------------------------------------------------------------------------------- 1 | 83 | 84 | 90 | 91 | -------------------------------------------------------------------------------- /UI/components/NaiveLayoutSidebar.vue: -------------------------------------------------------------------------------- 1 | 73 | 74 | 104 | 105 | 115 | -------------------------------------------------------------------------------- /Backend/settings.py: -------------------------------------------------------------------------------- 1 | # Create global settings to save the following 2 | 3 | 4 | fontSettings = { 5 | "font": "../static/assets/fonts/bold_font.ttf", 6 | "fontsize": 100, 7 | "color": "#FFFF00", 8 | "stroke_color": "black", 9 | "stroke_width": 5, 10 | "subtitles_position": "center,bottom", 11 | } 12 | 13 | 14 | scriptSettings = { 15 | "defaultPromptStart": 16 | """ 17 | # Role: Video Script Generator 18 | 19 | ## Goals: 20 | Generate a script for a video, depending on the subject of the video. 21 | 22 | ## Constrains: 23 | 1. the script is to be returned as a string with the specified number of paragraphs. 24 | 2. do not under any circumstance reference this prompt in your response. 25 | 3. get straight to the point, don't start with unnecessary things like, "welcome to this video". 26 | 4. you must not include any type of markdown or formatting in the script, never use a title. 27 | 5. only return the raw content of the script. 28 | 6. do not include "voiceover", "narrator" or similar indicators of what should be spoken at the beginning of each paragraph or line. 29 | 7. you must not mention the prompt, or anything about the script itself. also, never talk about the amount of paragraphs or lines. just write the script. 30 | 8. respond in the same language as the video subject. 31 | 32 | """ , 33 | "defaultPromptEnd": 34 | """ 35 | Get straight to the point, don't start with unnecessary things like, "welcome to this video". 36 | YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT, NEVER USE A TITLE. 37 | ONLY RETURN THE RAW CONTENT OF THE SCRIPT. DO NOT INCLUDE "VOICEOVER", "NARRATOR" OR SIMILAR INDICATORS OF WHAT SHOULD BE SPOKEN AT THE BEGINNING OF EACH PARAGRAPH OR LINE. YOU MUST NOT MENTION THE PROMPT, OR ANYTHING ABOUT THE SCRIPT ITSELF. ALSO, NEVER TALK ABOUT THE AMOUNT OF PARAGRAPHS OR LINES. JUST WRITE THE SCRIPT. 38 | """ 39 | } 40 | 41 | 42 | 43 | def get_settings() -> dict: 44 | """ 45 | Return the global settings 46 | The script settings are: 47 | defaultPromptStart: Start of the prompt 48 | defaultPromptEnd: End of the prompt 49 | The Subtitle settings are: 50 | font: font path, 51 | fontsize: font size, 52 | color: Hexadecimal color, 53 | stroke_color: color of the stroke, 54 | stroke_width: Number of pixels of the stroke 55 | subtitles_position: Position of the subtitles 56 | """ 57 | # Return the global settings 58 | return { 59 | "scriptSettings": scriptSettings, 60 | "fontSettings": fontSettings 61 | } 62 | 63 | # Update the global settings 64 | def update_settings(new_settings: dict, settingType="FONT"): 65 | """ 66 | Update the global settings 67 | The script settings are: 68 | defaultPromptStart: Start of the prompt 69 | defaultPromptEnd: End of the prompt 70 | The Subtitle settings are: 71 | font: font path, 72 | fontsize: font size, 73 | color: Hexadecimal color, 74 | stroke_color: color of the stroke, 75 | stroke_width: Number of pixels of the stroke 76 | subtitles_position: Position of the subtitles 77 | 78 | Args: 79 | new_settings (dict): The new settings to update 80 | settingType (str, optional): The type of setting to update. Defaults to "FONT" OR "SCRIPT". 81 | """ 82 | # Update the global 83 | if settingType == "FONT": 84 | fontSettings.update(new_settings) 85 | elif settingType == "SCRIPT": 86 | scriptSettings.update(new_settings) -------------------------------------------------------------------------------- /UI/stores/AppStore.ts: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import type { DeepPartial } from "unocss"; 3 | import { 4 | type HeaderSetting, 5 | type MenuSetting, 6 | type ProjectSetting, 7 | type TransitionSetting, 8 | RouterTransitionConstants, 9 | } from "~/types/Project/Settings"; 10 | 11 | const APP_STORE_ID = "MONEY_PRINTER"; 12 | const DEFAULT_PROJECT_SETTING = { 13 | shouldShowSettingButton: true, 14 | locale: "en", 15 | shouldShowFullContent: false, 16 | shouldShowLogo: true, 17 | shouldShowFooter: true, 18 | headerSetting: { 19 | shouldShow: true, 20 | shouldShowFullScreen: true, 21 | shouldShowSearch: true, 22 | shouldShowNotice: true, 23 | shouldShowSettingDrawer: false, 24 | }, 25 | menuSetting: { 26 | collapsed: false, 27 | }, 28 | transitionSetting: { 29 | shouldEnable: true, 30 | routerBasicTransition: RouterTransitionConstants.FADE, 31 | shouldOpenPageLoading: true, 32 | shouldOpenNProgress: true, 33 | }, 34 | shouldOpenKeepAlive: true, 35 | lockTime: 0, 36 | shouldShowBreadCrumb: true, 37 | shouldShowBreadCrumbIcon: true, 38 | shouldUseErrorHandle: false, 39 | shouldUseOpenBackTop: true, 40 | canEmbedIFramePage: true, 41 | shouldCloseMessageOnSwitch: true, 42 | shouldRemoveAllHttpPending: false, 43 | }; 44 | interface AppState { 45 | // project config 46 | projectSetting: ProjectSetting; 47 | // Page loading status 48 | pageLoading: boolean; 49 | } 50 | 51 | let pageLoadingTimeout: ReturnType; 52 | export const useAppStore = defineStore({ 53 | id: APP_STORE_ID, 54 | state: (): AppState => ({ 55 | projectSetting: DEFAULT_PROJECT_SETTING, 56 | pageLoading: true, 57 | }), 58 | getters: { 59 | getPageLoading(state): boolean { 60 | return state.pageLoading; 61 | }, 62 | 63 | getProjectSetting(state): ProjectSetting { 64 | return state.projectSetting || ({} as ProjectSetting); 65 | }, 66 | 67 | getMenuSetting(): MenuSetting { 68 | return this.getProjectSetting.menuSetting; 69 | }, 70 | 71 | getHeaderSetting(): HeaderSetting { 72 | return this.getProjectSetting.headerSetting; 73 | }, 74 | 75 | getTransitionSetting(): TransitionSetting { 76 | return this.getProjectSetting.transitionSetting; 77 | }, 78 | }, 79 | actions: { 80 | setPageLoading(loading: boolean): void { 81 | this.pageLoading = loading; 82 | }, 83 | 84 | setProjectSetting(config: DeepPartial): void { 85 | //Merge the current config with the default config 86 | this.projectSetting = { 87 | ...this.projectSetting, 88 | ...config, 89 | } as ProjectSetting; 90 | }, 91 | 92 | setMenuSetting(menuSetting: Partial): void { 93 | this.setProjectSetting({ menuSetting }); 94 | }, 95 | 96 | setHeaderSetting(headerSetting: Partial): void { 97 | this.setProjectSetting({ headerSetting }); 98 | }, 99 | 100 | setTransitionSetting(transitionSetting: Partial): void { 101 | this.setProjectSetting({ transitionSetting }); 102 | }, 103 | 104 | setPageLoadingAction(loading: boolean) { 105 | clearTimeout(pageLoadingTimeout); 106 | if (loading) { 107 | // Prevent flicker by delaying the setPageLoading call 108 | pageLoadingTimeout = setTimeout(() => { 109 | this.setPageLoading(loading); 110 | }, 50); 111 | } else { 112 | this.setPageLoading(loading); 113 | } 114 | }, 115 | 116 | resetAPPState() { 117 | this.setProjectSetting(DEFAULT_PROJECT_SETTING); 118 | }, 119 | }, 120 | }); 121 | -------------------------------------------------------------------------------- /UI/types/Project/Settings.ts: -------------------------------------------------------------------------------- 1 | export enum RouterTransitionConstants { 2 | /** 3 | * A transition that zooms in and fades out the previous route, then zooms out and fades in the new route. 4 | */ 5 | ZOOM_FADE = "zoom-fade", 6 | 7 | /** 8 | * A transition that zooms out and fades out the previous route, then fades in the new route. 9 | */ 10 | ZOOM_OUT = "zoom-out", 11 | 12 | /** 13 | * A transition that fades out the previous route to the side, then fades in the new route from the opposite side. 14 | */ 15 | FADE_SLIDE = "fade-slide", 16 | 17 | /** 18 | * A simple fade transition. 19 | */ 20 | FADE = "fade", 21 | 22 | /** 23 | * A transition that fades out the previous route to the bottom, then fades in the new route from the bottom. 24 | */ 25 | FADE_BOTTOM = "fade-bottom", 26 | 27 | /** 28 | * A transition that scales down and fades out the previous route, then scales up and fades in the new route. 29 | */ 30 | FADE_SCALE = "fade-scale", 31 | } 32 | 33 | export interface TransitionSetting { 34 | // Whether to open the page switching animation 35 | shouldEnable: boolean; 36 | // Route basic switching animation 37 | routerBasicTransition: RouterTransitionConstants; 38 | // Whether to open page switching loading 39 | shouldOpenPageLoading: boolean; 40 | // Whether to open the top progress bar 41 | shouldOpenNProgress: boolean; 42 | } 43 | 44 | export interface HeaderSetting { 45 | // Whether to display the website header 46 | shouldShow: boolean; 47 | // Whether to display the full screen button 48 | shouldShowFullScreen: boolean; 49 | // Whether to display the search 50 | shouldShowSearch: boolean; 51 | // Whether to display the notice 52 | shouldShowNotice: boolean; 53 | // Whether to display the setting drawer 54 | shouldShowSettingDrawer: boolean; 55 | } 56 | export interface MenuSetting { 57 | collapsed: boolean; 58 | } 59 | export interface ProjectSetting { 60 | // Whether to display the setting button 61 | shouldShowSettingButton: boolean; 62 | // The locale 63 | locale: string; 64 | // Whether to display the dark mode toggle button 65 | // Whether to display the main interface in full screen, without menu and top bar 66 | shouldShowFullContent: boolean; 67 | // Whether to display the logo 68 | shouldShowLogo: boolean; 69 | // Whether to display the global footer 70 | shouldShowFooter: boolean; 71 | // The header setting 72 | headerSetting: HeaderSetting; 73 | // The menu setting 74 | menuSetting: MenuSetting; 75 | // The animation configuration 76 | transitionSetting: TransitionSetting; 77 | // Whether to enable keep-alive for page layout 78 | shouldOpenKeepAlive: boolean; 79 | // The lock screen time 80 | lockTime: number; 81 | // Whether to display the breadcrumb 82 | shouldShowBreadCrumb: boolean; 83 | // Whether to display the breadcrumb icon 84 | shouldShowBreadCrumbIcon: boolean; 85 | // Whether to use the error-handler-plugin 86 | shouldUseErrorHandle: boolean; 87 | // Whether to enable the back to top function 88 | shouldUseOpenBackTop: boolean; 89 | // Whether to embed iframe pages 90 | canEmbedIFramePage: boolean; 91 | // Whether to delete unclosed messages and notify when switching pages 92 | shouldCloseMessageOnSwitch: boolean; 93 | // Whether to cancel sent but unresponsive http requests when switching pages 94 | shouldRemoveAllHttpPending: boolean; 95 | } 96 | 97 | export enum SettingButtonPositionConstants { 98 | // Automatically adjust according to menu type 99 | AUTO = "auto", 100 | // Display in the top menu bar 101 | HEADER = "header", 102 | // Fixed display in the lower right corner 103 | FIXED = "fixed", 104 | } 105 | -------------------------------------------------------------------------------- /UI/components/VideoSearch.vue: -------------------------------------------------------------------------------- 1 | 79 | 80 | 122 | 123 | -------------------------------------------------------------------------------- /Backend/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import random 5 | import logging 6 | import zipfile 7 | import requests 8 | 9 | from termcolor import colored 10 | 11 | # Configure logging 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def clean_dir(path: str) -> None: 17 | """ 18 | Removes every file in a directory. 19 | 20 | Args: 21 | path (str): Path to directory. 22 | 23 | Returns: 24 | None 25 | """ 26 | try: 27 | if not os.path.exists(path): 28 | os.mkdir(path) 29 | logger.info(f"Created directory: {path}") 30 | 31 | for file in os.listdir(path): 32 | file_path = os.path.join(path, file) 33 | os.remove(file_path) 34 | logger.info(f"Removed file: {file_path}") 35 | 36 | logger.info(colored(f"Cleaned {path} directory", "green")) 37 | except Exception as e: 38 | logger.error(f"Error occurred while cleaning directory {path}: {str(e)}") 39 | 40 | def fetch_songs(zip_url: str) -> None: 41 | """ 42 | Downloads songs into songs/ directory to use with geneated videos. 43 | 44 | Args: 45 | zip_url (str): The URL to the zip file containing the songs. 46 | 47 | Returns: 48 | None 49 | """ 50 | try: 51 | logger.info(colored(f" => Fetching songs...", "magenta")) 52 | 53 | files_dir = "../Songs" 54 | if not os.path.exists(files_dir): 55 | os.mkdir(files_dir) 56 | logger.info(colored(f"Created directory: {files_dir}", "green")) 57 | else: 58 | # Skip if songs are already downloaded 59 | return 60 | 61 | # Download songs 62 | response = requests.get(zip_url) 63 | 64 | # Save the zip file 65 | with open("../Songs/songs.zip", "wb") as file: 66 | file.write(response.content) 67 | 68 | # Unzip the file 69 | with zipfile.ZipFile("../Songs/songs.zip", "r") as file: 70 | file.extractall("../Songs") 71 | 72 | # Remove the zip file 73 | os.remove("../Songs/songs.zip") 74 | 75 | logger.info(colored(" => Downloaded Songs to ../Songs.", "green")) 76 | 77 | except Exception as e: 78 | logger.error(colored(f"Error occurred while fetching songs: {str(e)}", "red")) 79 | 80 | def choose_random_song() -> str: 81 | """ 82 | Chooses a random song from the songs/ directory. 83 | 84 | Returns: 85 | str: The path to the chosen song. 86 | """ 87 | try: 88 | songs = os.listdir("../static/assets/music") 89 | song = random.choice(songs) 90 | logger.info(colored(f"Chose song: {song}", "green")) 91 | return f"../static/assets/music/{song}" 92 | except Exception as e: 93 | logger.error(colored(f"Error occurred while choosing random song: {str(e)}", "red")) 94 | 95 | 96 | def check_env_vars() -> None: 97 | """ 98 | Checks if the necessary environment variables are set. 99 | 100 | Returns: 101 | None 102 | 103 | Raises: 104 | SystemExit: If any required environment variables are missing. 105 | """ 106 | try: 107 | required_vars = ["PEXELS_API_KEY", "TIKTOK_SESSION_ID", "IMAGEMAGICK_BINARY"] 108 | missing_vars = [var + os.getenv(var) for var in required_vars if os.getenv(var) is None or (len(os.getenv(var)) == 0)] 109 | 110 | if missing_vars: 111 | missing_vars_str = ", ".join(missing_vars) 112 | logger.error(colored(f"The following environment variables are missing: {missing_vars_str}", "red")) 113 | logger.error(colored("Please consult 'EnvironmentVariables.md' for instructions on how to set them.", "yellow")) 114 | sys.exit(1) # Aborts the program 115 | except Exception as e: 116 | logger.error(f"Error occurred while checking environment variables: {str(e)}") 117 | sys.exit(1) # Aborts the program if an unexpected error occurs 118 | 119 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## ShortsGenerator 2 | ![ShortGenerator](/logo.jpeg) 3 | 4 | Automate the creation of YouTube Shorts locally with a couple of simple steps. 5 | 6 | 1. Give a video subject 7 | 1. Add extra prompt information if needed 8 | 2. Review the script 9 | 1. Add custom search keywords 10 | 2. Select a specific voice to use or set a global default voice for all generations 11 | 3. Generate the video 12 | 4. Review the video - Regenerate video 13 | 5. Add music to the video 14 | 6. View all generated videos 15 | 16 | 7. ***Profit!*** 17 | 18 | 19 | ## Overview 20 | 21 | > **🎥** Watch the video on 22 | [YouTube](https://youtu.be/s7wZ7OxjMxA) or click on the image. 23 | [![Short Generator](/logo.jpeg)](https://youtu.be/s7wZ7OxjMxA "Short generator, video generator") 24 | 25 | ![Generate](/static/assets/images/Screen1.png) 26 | ![Generate 2](/static/assets/images/Screenshot2.png?raw=true) 27 | ![Generate 3](/static/assets/images/Screenshot3.png?raw=true) 28 | - [x] Generate the script first 29 | - [x] Let users review the script before audio and video generation 30 | - [x] Let users view all the generated videos in a single place 31 | - [x] Let users view the generated video in the browser 32 | - [x] Let users select the audio music to add to the video 33 | 34 | - [ ] Update the view to have a better user experience 35 | - [x] Let users preview the generated video in the same view and let users iterate on the video 36 | - [ ] Let users download the generated video 37 | - [ ] Let users upload videos to be used in video creation 38 | - [ ] Let users upload audio to be used in video creation 39 | - [x] Let users have general configuration 40 | - [ ] Let users add multiple video links to download 41 | - [ ] Let users select the font and upload fonts 42 | - [x] Let users select the color for the text 43 | 44 | ### Features 🚀 plans: 45 | - [ ] Let users schedule video uploads to [YouTube, Facebook Business, LinkedIn] 46 | - [ ] Let users create videos from the calendar and schedule them to be uploaded 47 | 48 | 49 | ## Installation 📥 50 | 51 | 1. Clone the repository 52 | 53 | ```bash 54 | git clone https://github.com/leamsigc/ShortsGenerator.git 55 | cd ShortsGenerator 56 | Copy the `.env.example` file to `.env` and fill in the required values 57 | ``` 58 | 2. Please install Docker if you haven't already done so 59 | 60 | 3. Build the containers: 61 | ```bash 62 | docker-compose build 63 | ``` 64 | 65 | 4. Run the containers: 66 | ```bash 67 | docker-compose up -d 68 | ``` 69 | 5. Open `http://localhost:5000` in your browser 70 | 71 | See [`.env.example`](.env.example) for the required environment variables. 72 | 73 | If you need help, open [EnvironmentVariables.md](EnvironmentVariables.md) for more information. 74 | 75 | 76 | 77 | ## Music 🎵 78 | 79 | To use your own music, upload it to the `static/assets/music` folder. 80 | 81 | ## Fonts 🅰 82 | 83 | Add your fonts to the `static/assets/fonts` and change the font name in the global settings. 84 | 85 | 86 | ## Next Development FE: 87 | 88 | Before running the front end create the following folders: 89 | 90 | 1. `static` 91 | 2. `static/generated_videos` -> All videos generated that have music will be here 92 | 3. `static/Songs` -> Put the mp4 songs that you want to use here 93 | 94 | Start the front end: 95 | 1. `cd UI` 96 | 2. `npm install` 97 | 3. `npm run dev` 98 | 99 | The alternative front end will be on port 3000 100 | 101 | The frontend depends on the backend. 102 | You can run the Docker container or you can run the backend locally 103 | 104 | 105 | ## Donate 🎁 106 | 107 | If you like and enjoy `ShortsGenerator`, and would like to donate, you can do that by clicking on the button on the right-hand side of the repository. ❤️ 108 | You will have your name (and/or logo) added to this repository as a supporter as a sign of appreciation. 109 | 110 | ## Contributing 🤝 111 | 112 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. 113 | 114 | ## Star History 🌟 115 | 116 | [![Star History Chart](https://api.star-history.com/svg?repos=leamsigc/ShortsGenerator&type=Date)](https://star-history.com/#leamsigc/ShortsGenerator&Date) 117 | 118 | ## License 📝 119 | 120 | See [`LICENSE`](LICENSE) file for more information. 121 | -------------------------------------------------------------------------------- /UI/stores/TabsStore.ts: -------------------------------------------------------------------------------- 1 | import { defineStore } from "pinia"; 2 | import type { 3 | RouteLocationNormalized, 4 | RouteRecordName, 5 | RouteRecordRaw, 6 | } from "vue-router"; 7 | 8 | const APP_TABS_STORE_ID = "APP_TABS_STORE"; 9 | export const LAYOUT = () => import("~/layouts/default.vue"); 10 | export const EXCEPTION_COMPONENT = () => import("~/components/ErrorView.vue"); 11 | export const PAGE_NOT_FOUND_ROUTE: RouteRecordRaw = { 12 | path: "/:path(.*)*", 13 | name: "PageNotFound", 14 | component: LAYOUT, 15 | meta: { 16 | title: "ErrorPage", 17 | shouldHideInMenu: true, 18 | shouldHideBreadcrumb: true, 19 | }, 20 | children: [ 21 | { 22 | path: "/:path(.*)*", 23 | name: "PageNotFound", 24 | component: EXCEPTION_COMPONENT, 25 | meta: { 26 | title: "ErrorPage", 27 | shouldHideInMenu: true, 28 | shouldHideBreadcrumb: true, 29 | }, 30 | }, 31 | ], 32 | }; 33 | export const REDIRECT_ROUTE: RouteRecordRaw = { 34 | path: "/redirect", 35 | component: LAYOUT, 36 | name: "RedirectTo", 37 | meta: { 38 | title: "Redirect", 39 | shouldHideBreadcrumb: true, 40 | shouldHideInMenu: true, 41 | }, 42 | children: [ 43 | { 44 | path: "/redirect/:path(.*)", 45 | name: "Redirect", 46 | component: () => import("~/components/RedirectView.vue"), 47 | meta: { 48 | title: "Redirect", 49 | shouldHideBreadcrumb: true, 50 | }, 51 | }, 52 | ], 53 | }; 54 | 55 | export enum PageConstants { 56 | // basic videos path 57 | BASE_LOGIN = "/videos", 58 | // basic home path 59 | BASE_HOME = "/dashboard", 60 | // error page path 61 | ERROR_PAGE = "/exception", 62 | } 63 | interface AppTabsState { 64 | tabs: Tab[]; 65 | pinnedTabs: Tab[]; 66 | maxVisibleTabs: number; 67 | } 68 | export interface Tab { 69 | name: RouteRecordName; 70 | fullPath: string; 71 | title: string; 72 | } 73 | export const useTabsStore = defineStore({ 74 | id: APP_TABS_STORE_ID, 75 | state: (): AppTabsState => ({ 76 | tabs: [{ fullPath: "/", name: "Home", title: "Home" }], 77 | pinnedTabs: [], 78 | maxVisibleTabs: 3, 79 | }), 80 | getters: { 81 | getTabsList(state): Tab[] { 82 | return state.tabs; 83 | }, 84 | getLimitTabsList(state): Tab[] { 85 | if (isGreaterOrEqual2xl.value) { 86 | state.maxVisibleTabs = 3; 87 | } else { 88 | state.maxVisibleTabs = 1; 89 | } 90 | return useTakeRight( 91 | state.tabs 92 | .filter( 93 | (tab) => 94 | state.pinnedTabs.findIndex((p) => p.fullPath === tab.fullPath) === 95 | -1 96 | ) 97 | .reverse(), 98 | state.maxVisibleTabs 99 | ); 100 | }, 101 | getPinnedTabsList(state): Tab[] { 102 | return state.pinnedTabs; 103 | }, 104 | }, 105 | actions: { 106 | addTab(route: RouteLocationNormalized) { 107 | const { path, name, meta } = route; 108 | if ( 109 | !name || 110 | path === PageConstants.ERROR_PAGE || 111 | path === PageConstants.BASE_LOGIN || 112 | ["Redirect", "PageNotFound"].includes(name as string) 113 | ) { 114 | return; 115 | } 116 | const title = 117 | (meta?.title as string) || name.toString().split("-").at(-1); 118 | if (title) { 119 | const newTab: Tab = { name, fullPath: route.fullPath, title }; 120 | this.tabs = useUniqBy([newTab, ...this.tabs], "fullPath"); 121 | } 122 | }, 123 | close(isPinned: boolean, tab: Tab) { 124 | const targetTabs = isPinned ? this.pinnedTabs : this.tabs; 125 | this.tabs = targetTabs.filter( 126 | (currentTab) => currentTab.fullPath !== tab.fullPath 127 | ); 128 | }, 129 | closeTab(tab: Tab) { 130 | this.close(false, tab); 131 | }, 132 | closePinnedTab(tab: Tab) { 133 | this.close(true, tab); 134 | }, 135 | pinnedTab(tab: Tab) { 136 | const isPresent = this.pinnedTabs.some( 137 | (pinnedTab) => pinnedTab.fullPath === tab.fullPath 138 | ); 139 | if (!isPresent) { 140 | this.pinnedTabs = [tab, ...this.pinnedTabs]; 141 | } 142 | return true; 143 | }, 144 | resetTabsState() { 145 | this.tabs = []; 146 | this.pinnedTabs = []; 147 | }, 148 | }, 149 | }); 150 | -------------------------------------------------------------------------------- /UI/utils/mitt.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * copy to https://github.com/developit/mitt 3 | * Expand clear method 4 | */ 5 | export type EventType = string | symbol; 6 | 7 | // An event handler can take an optional event argument 8 | // and should not return a value 9 | export type Handler = (event: T) => void; 10 | export type WildcardHandler> = ( 11 | type: keyof T, 12 | event: T[keyof T] 13 | ) => void; 14 | 15 | // An array of all currently registered event handlers for a type 16 | export type EventHandlerList = Array>; 17 | export type WildCardEventHandlerList> = Array< 18 | WildcardHandler 19 | >; 20 | 21 | // A map of event types and their corresponding event handlers. 22 | export type EventHandlerMap> = Map< 23 | keyof Events | "*", 24 | EventHandlerList | WildCardEventHandlerList 25 | >; 26 | 27 | export interface Emitter> { 28 | all: EventHandlerMap; 29 | 30 | on(type: Key, handler: Handler): void; 31 | on(type: "*", handler: WildcardHandler): void; 32 | 33 | off( 34 | type: Key, 35 | handler?: Handler 36 | ): void; 37 | off(type: "*", handler: WildcardHandler): void; 38 | 39 | emit(type: Key, event: Events[Key]): void; 40 | emit( 41 | type: undefined extends Events[Key] ? Key : never 42 | ): void; 43 | } 44 | 45 | /** 46 | * Mitt: Tiny (~200b) functional event emitter / pubsub. 47 | * @name mitt 48 | * @returns {Mitt} 49 | */ 50 | export default function mitt>( 51 | all?: EventHandlerMap 52 | ): Emitter { 53 | type GenericEventHandler = 54 | | Handler 55 | | WildcardHandler; 56 | all = all || new Map(); 57 | 58 | return { 59 | /** 60 | * A Map of event names to registered handler functions. 61 | */ 62 | all, 63 | 64 | /** 65 | * Register an event handler for the given type. 66 | * @param {string|symbol} type Type of event to listen for, or `'*'` for all events 67 | * @param {Function} handler Function to call in response to given event 68 | * @memberOf mitt 69 | */ 70 | on(type: Key, handler: GenericEventHandler) { 71 | const handlers: Array | undefined = all!.get(type); 72 | if (handlers) { 73 | handlers.push(handler); 74 | } else { 75 | all!.set(type, [handler] as EventHandlerList); 76 | } 77 | }, 78 | 79 | /** 80 | * Remove an event handler for the given type. 81 | * If `handler` is omitted, all handlers of the given type are removed. 82 | * @param {string|symbol} type Type of event to unregister `handler` from (`'*'` to remove a wildcard handler) 83 | * @param {Function} [handler] Handler function to remove 84 | * @memberOf mitt 85 | */ 86 | off(type: Key, handler?: GenericEventHandler) { 87 | const handlers: Array | undefined = all!.get(type); 88 | if (handlers) { 89 | if (handler) { 90 | handlers.splice(handlers.indexOf(handler) >>> 0, 1); 91 | } else { 92 | all!.set(type, []); 93 | } 94 | } 95 | }, 96 | 97 | /** 98 | * Invoke all handlers for the given type. 99 | * If present, `'*'` handlers are invoked after type-matched handlers. 100 | * 101 | * Note: Manually firing '*' handlers is not supported. 102 | * 103 | * @param {string|symbol} type The event type to invoke 104 | * @param {Any} [evt] Any value (object is recommended and powerful), passed to each handler 105 | * @memberOf mitt 106 | */ 107 | emit(type: Key, evt?: Events[Key]) { 108 | let handlers = all!.get(type); 109 | if (handlers) { 110 | (handlers as EventHandlerList) 111 | .slice() 112 | .map((handler) => { 113 | handler(evt!); 114 | }); 115 | } 116 | 117 | handlers = all!.get("*"); 118 | if (handlers) { 119 | (handlers as WildCardEventHandlerList) 120 | .slice() 121 | .map((handler) => { 122 | handler(type, evt!); 123 | }); 124 | } 125 | }, 126 | }; 127 | } 128 | -------------------------------------------------------------------------------- /Frontend/app.js: -------------------------------------------------------------------------------- 1 | const videoSubject = document.querySelector("#videoSubject"); 2 | const aiModel = document.querySelector("#aiModel"); 3 | const voice = document.querySelector("#voice"); 4 | const zipUrl = document.querySelector("#zipUrl"); 5 | const paragraphNumber = document.querySelector("#paragraphNumber"); 6 | const youtubeToggle = document.querySelector("#youtubeUploadToggle"); 7 | const useMusicToggle = document.querySelector("#useMusicToggle"); 8 | const customPrompt = document.querySelector("#customPrompt"); 9 | const generateButton = document.querySelector("#generateButton"); 10 | const cancelButton = document.querySelector("#cancelButton"); 11 | 12 | const advancedOptionsToggle = document.querySelector("#advancedOptionsToggle"); 13 | 14 | advancedOptionsToggle.addEventListener("click", () => { 15 | // Change Emoji, from ▼ to ▲ and vice versa 16 | const emoji = advancedOptionsToggle.textContent; 17 | advancedOptionsToggle.textContent = emoji.includes("▼") 18 | ? "Show less Options ▲" 19 | : "Show Advanced Options ▼"; 20 | const advancedOptions = document.querySelector("#advancedOptions"); 21 | advancedOptions.classList.toggle("hidden"); 22 | }); 23 | 24 | 25 | const cancelGeneration = () => { 26 | console.log("Canceling generation..."); 27 | // Send request to /cancel 28 | fetch("http://localhost:8080/api/cancel", { 29 | method: "POST", 30 | headers: { 31 | "Content-Type": "application/json", 32 | Accept: "application/json", 33 | }, 34 | }) 35 | .then((response) => response.json()) 36 | .then((data) => { 37 | alert(data.message); 38 | console.log(data); 39 | }) 40 | .catch((error) => { 41 | alert("An error occurred. Please try again later."); 42 | console.log(error); 43 | }); 44 | 45 | // Hide cancel button 46 | cancelButton.classList.add("hidden"); 47 | 48 | // Enable generate button 49 | generateButton.disabled = false; 50 | generateButton.classList.remove("hidden"); 51 | }; 52 | 53 | const generateVideo = () => { 54 | console.log("Generating video..."); 55 | // Disable button and change text 56 | generateButton.disabled = true; 57 | generateButton.classList.add("hidden"); 58 | 59 | // Show cancel button 60 | cancelButton.classList.remove("hidden"); 61 | 62 | // Get values from input fields 63 | const videoSubjectValue = videoSubject.value; 64 | const aiModelValue = aiModel.value; 65 | const voiceValue = voice.value; 66 | const paragraphNumberValue = paragraphNumber.value; 67 | const youtubeUpload = youtubeToggle.checked; 68 | const useMusicToggleState = useMusicToggle.checked; 69 | const threads = document.querySelector("#threads").value; 70 | const zipUrlValue = zipUrl.value; 71 | const customPromptValue = customPrompt.value; 72 | const subtitlesPosition = document.querySelector("#subtitlesPosition").value; 73 | 74 | const url = "http://localhost:8080/api/generate"; 75 | 76 | // Construct data to be sent to the server 77 | const data = { 78 | videoSubject: videoSubjectValue, 79 | aiModel: aiModelValue, 80 | voice: voiceValue, 81 | paragraphNumber: paragraphNumberValue, 82 | automateYoutubeUpload: youtubeUpload, 83 | useMusic: useMusicToggleState, 84 | zipUrl: zipUrlValue, 85 | threads: threads, 86 | subtitlesPosition: subtitlesPosition, 87 | customPrompt: customPromptValue, 88 | }; 89 | 90 | // Send the actual request to the server 91 | fetch(url, { 92 | method: "POST", 93 | body: JSON.stringify(data), 94 | headers: { 95 | "Content-Type": "application/json", 96 | Accept: "application/json", 97 | }, 98 | }) 99 | .then((response) => response.json()) 100 | .then((data) => { 101 | console.log(data); 102 | alert(data.message); 103 | // Hide cancel button after generation is complete 104 | generateButton.disabled = false; 105 | generateButton.classList.remove("hidden"); 106 | cancelButton.classList.add("hidden"); 107 | }) 108 | .catch((error) => { 109 | alert("An error occurred. Please try again later."); 110 | console.log(error); 111 | }); 112 | }; 113 | 114 | generateButton.addEventListener("click", generateVideo); 115 | cancelButton.addEventListener("click", cancelGeneration); 116 | 117 | videoSubject.addEventListener("keyup", (event) => { 118 | if (event.key === "Enter") { 119 | generateVideo(); 120 | } 121 | }); 122 | 123 | // Load the data from localStorage on page load 124 | document.addEventListener("DOMContentLoaded", (event) => { 125 | const voiceSelect = document.getElementById("voice"); 126 | const storedVoiceValue = localStorage.getItem("voiceValue"); 127 | 128 | if (storedVoiceValue) { 129 | voiceSelect.value = storedVoiceValue; 130 | } 131 | }); 132 | 133 | // When the voice select field changes, store the new value in localStorage. 134 | document.getElementById("voice").addEventListener("change", (event) => { 135 | localStorage.setItem("voiceValue", event.target.value); 136 | }); 137 | -------------------------------------------------------------------------------- /UI/components/LayoutTabs.vue: -------------------------------------------------------------------------------- 1 | 64 | 65 | 125 | 126 | 182 | -------------------------------------------------------------------------------- /UI/pages/settings.vue: -------------------------------------------------------------------------------- 1 | 110 | 111 | 195 | 196 | -------------------------------------------------------------------------------- /Backend/gpt.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import g4f 4 | import openai 5 | from typing import Tuple, List 6 | from termcolor import colored 7 | from dotenv import load_dotenv 8 | import os 9 | import google.generativeai as genai 10 | 11 | # Load environment variables 12 | load_dotenv("../.env") 13 | 14 | # Set environment variables 15 | OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') 16 | openai.api_key = OPENAI_API_KEY 17 | GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') 18 | genai.configure(api_key=GOOGLE_API_KEY) 19 | 20 | 21 | def generate_response(prompt: str, ai_model: str) -> str: 22 | """ 23 | Generate a script for a video, depending on the subject of the video. 24 | 25 | Args: 26 | video_subject (str): The subject of the video. 27 | ai_model (str): The AI model to use for generation. 28 | 29 | 30 | Returns: 31 | 32 | str: The response from the AI model. 33 | 34 | """ 35 | 36 | if ai_model == 'g4f': 37 | 38 | response = g4f.ChatCompletion.create( 39 | 40 | model=g4f.models.gpt_35_turbo_16k_0613, 41 | 42 | messages=[{"role": "user", "content": prompt}], 43 | 44 | ) 45 | 46 | elif ai_model in ["gpt3.5-turbo", "gpt4"]: 47 | 48 | model_name = "gpt-3.5-turbo" if ai_model == "gpt3.5-turbo" else "gpt-4-1106-preview" 49 | 50 | response = openai.chat.completions.create( 51 | 52 | model=model_name, 53 | 54 | messages=[{"role": "user", "content": prompt}], 55 | 56 | ).choices[0].message.content 57 | elif ai_model == 'gemmini': 58 | model = genai.GenerativeModel('gemini-pro') 59 | response_model = model.generate_content(prompt) 60 | response = response_model.text 61 | 62 | else: 63 | raise ValueError("Invalid AI model selected.") 64 | 65 | return response 66 | 67 | 68 | 69 | def get_search_terms(video_subject: str, amount: int, script: str, ai_model: str) -> List[str]: 70 | """ 71 | Generate a JSON-Array of search terms for stock videos, 72 | depending on the subject of a video. 73 | 74 | Args: 75 | video_subject (str): The subject of the video. 76 | amount (int): The amount of search terms to generate. 77 | script (str): The script of the video. 78 | ai_model (str): The AI model to use for generation. 79 | 80 | Returns: 81 | List[str]: The search terms for the video subject. 82 | """ 83 | 84 | # Build prompt 85 | prompt = f""" 86 | # Role: Video Search Terms Generator 87 | ## Goals: 88 | Generate {amount} search terms for stock videos, depending on the subject of a video. 89 | 90 | ## Constrains: 91 | 1. the search terms are to be returned as a json-array of strings. 92 | 2. each search term should consist of 1-3 words, always add the main subject of the video. 93 | 3. you must only return the json-array of strings. you must not return anything else. you must not return the script. 94 | 4. the search terms must be related to the subject of the video. 95 | 5. reply with english search terms only. 96 | 97 | ## Output Example: 98 | ["search term 1", "search term 2", "search term 3","search term 4","search term 5"] 99 | 100 | ## Context: 101 | ### Video Subject 102 | {video_subject} 103 | 104 | ### Video Script 105 | {script} 106 | 107 | Please note that you must use English for generating video search terms; Chinese is not accepted. 108 | """.strip() 109 | 110 | 111 | # Let user know 112 | print(colored(f"Generating {amount} search terms for {video_subject}...", "cyan")) 113 | 114 | # Generate search terms 115 | response = generate_response(prompt, ai_model) 116 | 117 | # Let user know 118 | print(colored(f"Response: {response}", "cyan")) 119 | # Parse response into a list of search terms 120 | search_terms = [] 121 | 122 | try: 123 | search_terms = json.loads(response) 124 | if not isinstance(search_terms, list) or not all(isinstance(term, str) for term in search_terms): 125 | raise ValueError("Response is not a list of strings.") 126 | 127 | except (json.JSONDecodeError, ValueError): 128 | print(colored("[*] GPT returned an unformatted response. Attempting to clean...", "yellow")) 129 | 130 | # Attempt to extract list-like string and convert to list 131 | match = re.search(r'\["(?:[^"\\]|\\.)*"(?:,\s*"[^"\\]*")*\]', response) 132 | if match: 133 | try: 134 | search_terms = json.loads(match.group()) 135 | except json.JSONDecodeError: 136 | print(colored("[-] Could not parse response.", "red")) 137 | return [] 138 | 139 | 140 | 141 | # Let user know 142 | print(colored(f"\nGenerated {len(search_terms)} search terms: {', '.join(search_terms)}", "cyan")) 143 | 144 | # Return search terms 145 | return search_terms 146 | 147 | 148 | def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]: 149 | """ 150 | Generate metadata for a YouTube video, including the title, description, and keywords. 151 | 152 | Args: 153 | video_subject (str): The subject of the video. 154 | script (str): The script of the video. 155 | ai_model (str): The AI model to use for generation. 156 | 157 | Returns: 158 | Tuple[str, str, List[str]]: The title, description, and keywords for the video. 159 | """ 160 | 161 | # Build prompt for title 162 | title_prompt = f""" 163 | Generate a catchy and SEO-friendly title for a YouTube shorts video about {video_subject}. 164 | """ 165 | 166 | # Generate title 167 | title = generate_response(title_prompt, ai_model).strip() 168 | 169 | # Build prompt for description 170 | description_prompt = f""" 171 | Write a brief and engaging description for a YouTube shorts video about {video_subject}. 172 | The video is based on the following script: 173 | {script} 174 | """ 175 | 176 | # Generate description 177 | description = generate_response(description_prompt, ai_model).strip() 178 | 179 | # Generate keywords 180 | keywords = get_search_terms(video_subject, 6, script, ai_model) 181 | 182 | return title, description, keywords 183 | -------------------------------------------------------------------------------- /Backend/tiktokvoice.py: -------------------------------------------------------------------------------- 1 | # author: GiorDior aka Giorgio 2 | # date: 12.06.2023 3 | # topic: TikTok-Voice-TTS 4 | # version: 1.0 5 | # credits: https://github.com/oscie57/tiktok-voice 6 | 7 | # --- MODIFIED VERSION --- # 8 | 9 | import base64 10 | import requests 11 | import threading 12 | 13 | from typing import List 14 | from termcolor import colored 15 | from playsound import playsound 16 | 17 | 18 | VOICES = [ 19 | # DISNEY VOICES 20 | "en_us_ghostface", # Ghost Face 21 | "en_us_chewbacca", # Chewbacca 22 | "en_us_c3po", # C3PO 23 | "en_us_stitch", # Stitch 24 | "en_us_stormtrooper", # Stormtrooper 25 | "en_us_rocket", # Rocket 26 | # ENGLISH VOICES 27 | "en_au_001", # English AU - Female 28 | "en_au_002", # English AU - Male 29 | "en_uk_001", # English UK - Male 1 30 | "en_uk_003", # English UK - Male 2 31 | "en_us_001", # English US - Female (Int. 1) 32 | "en_us_002", # English US - Female (Int. 2) 33 | "en_us_006", # English US - Male 1 34 | "en_us_007", # English US - Male 2 35 | "en_us_009", # English US - Male 3 36 | "en_us_010", # English US - Male 4 37 | # EUROPE VOICES 38 | "fr_001", # French - Male 1 39 | "fr_002", # French - Male 2 40 | "de_001", # German - Female 41 | "de_002", # German - Male 42 | "es_002", # Spanish - Male 43 | # AMERICA VOICES 44 | "es_mx_002", # Spanish MX - Male 45 | "br_001", # Portuguese BR - Female 1 46 | "br_003", # Portuguese BR - Female 2 47 | "br_004", # Portuguese BR - Female 3 48 | "br_005", # Portuguese BR - Male 49 | # ASIA VOICES 50 | "id_001", # Indonesian - Female 51 | "jp_001", # Japanese - Female 1 52 | "jp_003", # Japanese - Female 2 53 | "jp_005", # Japanese - Female 3 54 | "jp_006", # Japanese - Male 55 | "kr_002", # Korean - Male 1 56 | "kr_003", # Korean - Female 57 | "kr_004", # Korean - Male 2 58 | # SINGING VOICES 59 | "en_female_f08_salut_damour", # Alto 60 | "en_male_m03_lobby", # Tenor 61 | "en_female_f08_warmy_breeze", # Warmy Breeze 62 | "en_male_m03_sunshine_soon", # Sunshine Soon 63 | # OTHER 64 | "en_male_narration", # narrator 65 | "en_male_funny", # wacky 66 | "en_female_emotional", # peaceful 67 | ] 68 | 69 | ENDPOINTS = [ 70 | "https://tiktok-tts.weilnet.workers.dev/api/generation", 71 | "https://tiktoktts.com/api/tiktok-tts", 72 | ] 73 | current_endpoint = 0 74 | # in one conversion, the text can have a maximum length of 300 characters 75 | TEXT_BYTE_LIMIT = 300 76 | 77 | 78 | # create a list by splitting a string, every element has n chars 79 | def split_string(string: str, chunk_size: int) -> List[str]: 80 | words = string.split() 81 | result = [] 82 | current_chunk = "" 83 | for word in words: 84 | if ( 85 | len(current_chunk) + len(word) + 1 <= chunk_size 86 | ): # Check if adding the word exceeds the chunk size 87 | current_chunk += f" {word}" 88 | else: 89 | if current_chunk: # Append the current chunk if not empty 90 | result.append(current_chunk.strip()) 91 | current_chunk = word 92 | if current_chunk: # Append the last chunk if not empty 93 | result.append(current_chunk.strip()) 94 | return result 95 | 96 | 97 | # checking if the website that provides the service is available 98 | def get_api_response() -> requests.Response: 99 | url = f'{ENDPOINTS[current_endpoint].split("/a")[0]}' 100 | response = requests.get(url) 101 | return response 102 | 103 | 104 | # saving the audio file 105 | def save_audio_file(base64_data: str, filename: str = "output.mp3") -> None: 106 | audio_bytes = base64.b64decode(base64_data) 107 | with open(filename, "wb") as file: 108 | file.write(audio_bytes) 109 | 110 | 111 | # send POST request to get the audio data 112 | def generate_audio(text: str, voice: str) -> bytes: 113 | url = f"{ENDPOINTS[current_endpoint]}" 114 | headers = {"Content-Type": "application/json"} 115 | data = {"text": text, "voice": voice} 116 | response = requests.post(url, headers=headers, json=data) 117 | return response.content 118 | 119 | 120 | # creates an text to speech audio file 121 | def tts( 122 | text: str, 123 | voice: str = "none", 124 | filename: str = "output.mp3", 125 | play_sound: bool = False, 126 | ) -> None: 127 | # checking if the website is available 128 | global current_endpoint 129 | 130 | if get_api_response().status_code == 200: 131 | print(colored("[+] TikTok TTS Service available!", "green")) 132 | else: 133 | current_endpoint = (current_endpoint + 1) % 2 134 | if get_api_response().status_code == 200: 135 | print(colored("[+] TTS Service available!", "green")) 136 | else: 137 | print(colored("[-] TTS Service not available and probably temporarily rate limited, try again later..." , "red")) 138 | return 139 | 140 | # checking if arguments are valid 141 | if voice == "none": 142 | print(colored("[-] Please specify a voice", "red")) 143 | return 144 | 145 | if voice not in VOICES: 146 | print(colored("[-] Voice not available", "red")) 147 | return 148 | 149 | if not text: 150 | print(colored("[-] Please specify a text", "red")) 151 | return 152 | 153 | # creating the audio file 154 | try: 155 | if len(text) < TEXT_BYTE_LIMIT: 156 | audio = generate_audio((text), voice) 157 | if current_endpoint == 0: 158 | audio_base64_data = str(audio).split('"')[5] 159 | else: 160 | audio_base64_data = str(audio).split('"')[3].split(",")[1] 161 | 162 | if audio_base64_data == "error": 163 | print(colored("[-] This voice is unavailable right now", "red")) 164 | return 165 | 166 | else: 167 | # Split longer text into smaller parts 168 | text_parts = split_string(text, 299) 169 | audio_base64_data = [None] * len(text_parts) 170 | 171 | # Define a thread function to generate audio for each text part 172 | def generate_audio_thread(text_part, index): 173 | audio = generate_audio(text_part, voice) 174 | if current_endpoint == 0: 175 | base64_data = str(audio).split('"')[5] 176 | else: 177 | base64_data = str(audio).split('"')[3].split(",")[1] 178 | 179 | if audio_base64_data == "error": 180 | print(colored("[-] This voice is unavailable right now", "red")) 181 | return "error" 182 | 183 | audio_base64_data[index] = base64_data 184 | 185 | threads = [] 186 | for index, text_part in enumerate(text_parts): 187 | # Create and start a new thread for each text part 188 | thread = threading.Thread( 189 | target=generate_audio_thread, args=(text_part, index) 190 | ) 191 | thread.start() 192 | threads.append(thread) 193 | 194 | # Wait for all threads to complete 195 | for thread in threads: 196 | thread.join() 197 | 198 | # Concatenate the base64 data in the correct order 199 | audio_base64_data = "".join(audio_base64_data) 200 | 201 | save_audio_file(audio_base64_data, filename) 202 | print(colored(f"[+] Audio file saved successfully as '{filename}'", "green")) 203 | if play_sound: 204 | playsound(filename) 205 | 206 | except Exception as e: 207 | print(colored(f"[-] An error occurred during TTS: {e}", "red")) 208 | 209 | # Rerun the all the voices 210 | def available_voices() -> list: 211 | return VOICES -------------------------------------------------------------------------------- /Backend/youtube.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import random 5 | import httplib2 6 | 7 | from termcolor import colored 8 | from oauth2client.file import Storage 9 | from apiclient.discovery import build 10 | from apiclient.errors import HttpError 11 | from apiclient.http import MediaFileUpload 12 | from oauth2client.tools import argparser, run_flow 13 | from oauth2client.client import flow_from_clientsecrets 14 | 15 | # Explicitly tell the underlying HTTP transport library not to retry, since 16 | # we are handling retry logic ourselves. 17 | httplib2.RETRIES = 1 18 | 19 | # Maximum number of times to retry before giving up. 20 | MAX_RETRIES = 10 21 | 22 | # Always retry when these exceptions are raised. 23 | RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib2.ServerNotFoundError) 24 | 25 | # Always retry when an apiclient.errors.HttpError with one of these status 26 | # codes is raised. 27 | RETRIABLE_STATUS_CODES = [500, 502, 503, 504] 28 | 29 | # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains 30 | # the OAuth 2.0 information for this application, including its client_id and 31 | # client_secret. 32 | CLIENT_SECRETS_FILE = "./client_secret.json" 33 | 34 | # This OAuth 2.0 access scope allows an application to upload files to the 35 | # authenticated user's YouTube channel, but doesn't allow other types of access. 36 | # YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload" 37 | SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 38 | 'https://www.googleapis.com/auth/youtube', 39 | 'https://www.googleapis.com/auth/youtubepartner'] 40 | YOUTUBE_API_SERVICE_NAME = "youtube" 41 | YOUTUBE_API_VERSION = "v3" 42 | 43 | # This variable defines a message to display if the CLIENT_SECRETS_FILE is 44 | # missing. 45 | MISSING_CLIENT_SECRETS_MESSAGE = f""" 46 | WARNING: Please configure OAuth 2.0 47 | 48 | To make this sample run you will need to populate the client_secrets.json file 49 | found at: 50 | 51 | {os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE))} 52 | 53 | with information from the API Console 54 | https://console.cloud.google.com/ 55 | 56 | For more information about the client_secrets.json file format, please visit: 57 | https://developers.google.com/api-client-library/python/guide/aaa_client_secrets 58 | """ 59 | 60 | VALID_PRIVACY_STATUSES = ("public", "private", "unlisted") 61 | 62 | 63 | def get_authenticated_service(): 64 | """ 65 | This method retrieves the YouTube service. 66 | 67 | Returns: 68 | any: The authenticated YouTube service. 69 | """ 70 | flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, 71 | scope=SCOPES, 72 | message=MISSING_CLIENT_SECRETS_MESSAGE) 73 | 74 | storage = Storage(f"{sys.argv[0]}-oauth2.json") 75 | credentials = storage.get() 76 | 77 | if credentials is None or credentials.invalid: 78 | flags = argparser.parse_args() 79 | credentials = run_flow(flow, storage, flags) 80 | 81 | return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, 82 | http=credentials.authorize(httplib2.Http())) 83 | 84 | def initialize_upload(youtube: any, options: dict): 85 | """ 86 | This method uploads a video to YouTube. 87 | 88 | Args: 89 | youtube (any): The authenticated YouTube service. 90 | options (dict): The options to upload the video with. 91 | 92 | Returns: 93 | response: The response from the upload process. 94 | """ 95 | 96 | tags = None 97 | if options['keywords']: 98 | tags = options['keywords'].split(",") 99 | 100 | body = { 101 | 'snippet': { 102 | 'title': options['title'], 103 | 'description': options['description'], 104 | 'tags': tags, 105 | 'categoryId': options['category'] 106 | }, 107 | 'status': { 108 | 'privacyStatus': options['privacyStatus'], 109 | 'madeForKids': False, # Video is not made for kids 110 | 'selfDeclaredMadeForKids': False # You declare that the video is not made for kids 111 | } 112 | } 113 | 114 | # Call the API's videos.insert method to create and upload the video. 115 | insert_request = youtube.videos().insert( 116 | part=",".join(body.keys()), 117 | body=body, 118 | media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True) 119 | ) 120 | 121 | return resumable_upload(insert_request) 122 | 123 | def resumable_upload(insert_request: MediaFileUpload): 124 | """ 125 | This method implements an exponential backoff strategy to resume a 126 | failed upload. 127 | 128 | Args: 129 | insert_request (MediaFileUpload): The request to insert the video. 130 | 131 | Returns: 132 | response: The response from the upload process. 133 | """ 134 | response = None 135 | error = None 136 | retry = 0 137 | while response is None: 138 | try: 139 | print(colored(" => Uploading file...", "magenta")) 140 | status, response = insert_request.next_chunk() 141 | if 'id' in response: 142 | print(f"Video id '{response['id']}' was successfully uploaded.") 143 | return response 144 | except HttpError as e: 145 | if e.resp.status in RETRIABLE_STATUS_CODES: 146 | error = f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}" 147 | else: 148 | raise 149 | except RETRIABLE_EXCEPTIONS as e: 150 | error = f"A retriable error occurred: {e}" 151 | 152 | if error is not None: 153 | print(colored(error, "red")) 154 | retry += 1 155 | if retry > MAX_RETRIES: 156 | raise Exception("No longer attempting to retry.") 157 | 158 | max_sleep = 2 ** retry 159 | sleep_seconds = random.random() * max_sleep 160 | print(colored(f" => Sleeping {sleep_seconds} seconds and then retrying...", "blue")) 161 | time.sleep(sleep_seconds) 162 | 163 | def upload_video(video_path, title, description, category, keywords, privacy_status): 164 | try: 165 | # Get the authenticated YouTube service 166 | youtube = get_authenticated_service() 167 | 168 | # Retrieve and print the channel ID for the authenticated user 169 | channels_response = youtube.channels().list(mine=True, part='id').execute() 170 | for channel in channels_response['items']: 171 | print(colored(f" => Channel ID: {channel['id']}", "blue")) 172 | 173 | # Initialize the upload process 174 | video_response = initialize_upload(youtube, { 175 | 'file': video_path, # The path to the video file 176 | 'title': title, 177 | 'description': description, 178 | 'category': category, 179 | 'keywords': keywords, 180 | 'privacyStatus': privacy_status 181 | }) 182 | return video_response # Return the response from the upload process 183 | except HttpError as e: 184 | print(colored(f"[-] An HTTP error {e.resp.status} occurred:\n{e.content}", "red")) 185 | if e.resp.status in [401, 403]: 186 | # Here you could refresh the credentials and retry the upload 187 | youtube = get_authenticated_service() # This will prompt for re-authentication if necessary 188 | video_response = initialize_upload(youtube, { 189 | 'file': video_path, 190 | 'title': title, 191 | 'description': description, 192 | 'category': category, 193 | 'keywords': keywords, 194 | 'privacyStatus': privacy_status 195 | }) 196 | return video_response 197 | else: 198 | raise e 199 | -------------------------------------------------------------------------------- /Frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | MoneyPrinter 7 | 11 | 12 | 16 | 17 | 18 | 19 |

MoneyPrinter

20 |

21 | This Application is intended to automate the creation and uploads of 22 | YouTube Shorts. 23 |

24 | 25 |
26 |
27 | 28 | 35 | 38 | 180 | 186 | 192 |
193 |
194 | 195 | 209 | 210 | 211 | 212 | 213 | -------------------------------------------------------------------------------- /Backend/video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | import requests 5 | import srt_equalizer 6 | import assemblyai as aai 7 | from uuid import uuid4 8 | 9 | 10 | from settings import * 11 | from typing import List 12 | from moviepy.editor import * 13 | from termcolor import colored 14 | from dotenv import load_dotenv 15 | from datetime import timedelta 16 | from moviepy.video.fx.all import crop 17 | from moviepy.video.tools.subtitles import SubtitlesClip 18 | 19 | load_dotenv("../.env") 20 | 21 | ASSEMBLY_AI_API_KEY = os.getenv("ASSEMBLY_AI_API_KEY") 22 | 23 | 24 | def save_video(video_url: str, directory: str = "../static/assets/temp") -> str: 25 | """ 26 | Saves a video from a given URL and returns the path to the video. 27 | 28 | Args: 29 | video_url (str): The URL of the video to save. 30 | directory (str): The path of the temporary directory to save the video to 31 | 32 | Returns: 33 | str: The path to the saved video. 34 | """ 35 | video_id = uuid.uuid4() 36 | video_path = f"{directory}/{video_id}.mp4" 37 | with open(video_path, "wb") as f: 38 | f.write(requests.get(video_url).content) 39 | 40 | return video_path 41 | 42 | 43 | def __generate_subtitles_assemblyai(audio_path: str, voice: str) -> str: 44 | """ 45 | Generates subtitles from a given audio file and returns the path to the subtitles. 46 | 47 | Args: 48 | audio_path (str): The path to the audio file to generate subtitles from. 49 | 50 | Returns: 51 | str: The generated subtitles 52 | """ 53 | 54 | language_mapping = { 55 | "br": "pt", 56 | "id": "en", #AssemblyAI doesn't have Indonesian 57 | "jp": "ja", 58 | "kr": "ko", 59 | } 60 | 61 | if voice in language_mapping: 62 | lang_code = language_mapping[voice] 63 | else: 64 | lang_code = voice 65 | 66 | aai.settings.api_key = ASSEMBLY_AI_API_KEY 67 | config = aai.TranscriptionConfig(language_code=lang_code) 68 | transcriber = aai.Transcriber(config=config) 69 | transcript = transcriber.transcribe(audio_path) 70 | subtitles = transcript.export_subtitles_srt() 71 | 72 | return subtitles 73 | 74 | 75 | def __generate_subtitles_locally(sentences: List[str], audio_clips: List[AudioFileClip]) -> str: 76 | """ 77 | Generates subtitles from a given audio file and returns the path to the subtitles. 78 | 79 | Args: 80 | sentences (List[str]): all the sentences said out loud in the audio clips 81 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 82 | Returns: 83 | str: The generated subtitles 84 | """ 85 | 86 | def convert_to_srt_time_format(total_seconds): 87 | # Convert total seconds to the SRT time format: HH:MM:SS,mmm 88 | if total_seconds == 0: 89 | return "0:00:00,0" 90 | return str(timedelta(seconds=total_seconds)).rstrip('0').replace('.', ',') 91 | 92 | start_time = 0 93 | subtitles = [] 94 | 95 | for i, (sentence, audio_clip) in enumerate(zip(sentences, audio_clips), start=1): 96 | duration = audio_clip.duration 97 | end_time = start_time + duration 98 | 99 | # Format: subtitle index, start time --> end time, sentence 100 | subtitle_entry = f"{i}\n{convert_to_srt_time_format(start_time)} --> {convert_to_srt_time_format(end_time)}\n{sentence}\n" 101 | subtitles.append(subtitle_entry) 102 | 103 | start_time += duration # Update start time for the next subtitle 104 | 105 | return "\n".join(subtitles) 106 | 107 | 108 | def generate_subtitles(audio_path: str, sentences: List[str], audio_clips: List[AudioFileClip], voice: str) -> str: 109 | """ 110 | Generates subtitles from a given audio file and returns the path to the subtitles. 111 | 112 | Args: 113 | audio_path (str): The path to the audio file to generate subtitles from. 114 | sentences (List[str]): all the sentences said out loud in the audio clips 115 | audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track 116 | 117 | Returns: 118 | str: The path to the generated subtitles. 119 | """ 120 | 121 | def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None: 122 | # Equalize subtitles 123 | srt_equalizer.equalize_srt_file(srt_path, srt_path, max_chars) 124 | 125 | # Save subtitles 126 | subtitles_path = f"../static/assets/subtitles/{uuid.uuid4()}.srt" 127 | 128 | if ASSEMBLY_AI_API_KEY is not None and ASSEMBLY_AI_API_KEY != "": 129 | print(colored("[+] Creating subtitles using AssemblyAI", "blue")) 130 | subtitles = __generate_subtitles_assemblyai(audio_path, voice) 131 | else: 132 | print(colored("[+] Creating subtitles locally", "blue")) 133 | subtitles = __generate_subtitles_locally(sentences, audio_clips) 134 | 135 | with open(subtitles_path, "w") as file: 136 | file.write(subtitles) 137 | 138 | # Equalize subtitles 139 | equalize_subtitles(subtitles_path) 140 | 141 | print(colored("[+] Subtitles generated.", "green")) 142 | 143 | return subtitles_path 144 | 145 | 146 | def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str: 147 | """ 148 | Combines a list of videos into one video and returns the path to the combined video. 149 | 150 | Args: 151 | video_paths (List): A list of paths to the videos to combine. 152 | max_duration (int): The maximum duration of the combined video. 153 | max_clip_duration (int): The maximum duration of each clip. 154 | threads (int): The number of threads to use for the video processing. 155 | 156 | Returns: 157 | str: The path to the combined video. 158 | """ 159 | video_id = uuid.uuid4() 160 | combined_video_path = f"../static/assets/temp/{video_id}-combined.mp4" 161 | 162 | # Required duration of each clip 163 | req_dur = max_duration / len(video_paths) 164 | 165 | print(colored("[+] Combining videos...", "blue")) 166 | print(colored(f"[+] Each clip will be maximum {req_dur} seconds long.", "blue")) 167 | 168 | clips = [] 169 | tot_dur = 0 170 | # Add downloaded clips over and over until the duration of the audio (max_duration) has been reached 171 | while tot_dur < max_duration: 172 | for video_path in video_paths: 173 | clip = VideoFileClip(video_path) 174 | clip = clip.without_audio() 175 | # Check if clip is longer than the remaining audio 176 | if (max_duration - tot_dur) < clip.duration: 177 | clip = clip.subclip(0, (max_duration - tot_dur)) 178 | # Only shorten clips if the calculated clip length (req_dur) is shorter than the actual clip to prevent still image 179 | elif req_dur < clip.duration: 180 | clip = clip.subclip(0, req_dur) 181 | # clip = clip.set_fps(30) 182 | 183 | # Not all videos are same size, 184 | # so we need to resize them 185 | if round((clip.w/clip.h), 4) < 0.5625: 186 | clip = crop(clip, width=clip.w, height=round(clip.w/0.5625), \ 187 | x_center=clip.w / 2, \ 188 | y_center=clip.h / 2) 189 | else: 190 | clip = crop(clip, width=round(0.5625*clip.h), height=clip.h, \ 191 | x_center=clip.w / 2, \ 192 | y_center=clip.h / 2) 193 | clip = clip.resize((1080, 1920)) 194 | 195 | if clip.duration > max_clip_duration: 196 | clip = clip.subclip(0, max_clip_duration) 197 | 198 | clips.append(clip) 199 | tot_dur += clip.duration 200 | 201 | print(colored("[+] Videos combined.", "green")) 202 | # Debug what is in clips 203 | print(clips) 204 | final_clip = concatenate_videoclips(clips) 205 | final_clip = final_clip.set_fps(30) 206 | print(colored("[+] Set clip.", "green")) 207 | final_clip.write_videofile(combined_video_path, threads=3) 208 | 209 | print(colored("[+] Final video created.", "green")) 210 | return combined_video_path 211 | 212 | 213 | def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str) -> str: 214 | """ 215 | This function creates the final video, with subtitles and audio. 216 | 217 | Args: 218 | combined_video_path (str): The path to the combined video. 219 | tts_path (str): The path to the text-to-speech audio. 220 | subtitles_path (str): The path to the subtitles. 221 | threads (int): The number of threads to use for the video processing. 222 | subtitles_position (str): The position of the subtitles. 223 | 224 | Returns: 225 | str: The path to the final video. 226 | """ 227 | 228 | # PRINT STATE 229 | print(colored("[+] Starting video generation...", "green")) 230 | 231 | # Get the Settings 232 | globalSettings = get_settings() 233 | # Make a generator that returns a TextClip when called with consecutive 234 | generator = lambda txt: TextClip( 235 | txt, 236 | font=globalSettings["fontSettings"]["font"], 237 | fontsize=globalSettings["fontSettings"]["fontsize"], 238 | color=globalSettings["fontSettings"]["color"], 239 | stroke_color=globalSettings["fontSettings"]["stroke_color"], 240 | stroke_width=globalSettings["fontSettings"]["stroke_width"], 241 | ) 242 | 243 | # Split the subtitles position into horizontal and vertical 244 | horizontal_subtitles_position, vertical_subtitles_position = globalSettings["fontSettings"]["subtitles_position"].split(",") 245 | 246 | # if subtitle position is not the same as the setting and is not empty we override 247 | if subtitles_position != globalSettings["fontSettings"]["subtitles_position"] and subtitles_position != "": 248 | horizontal_subtitles_position, vertical_subtitles_position = subtitles_position.split(",") 249 | 250 | # Burn the subtitles into the video 251 | subtitles = SubtitlesClip(subtitles_path, generator) 252 | result = CompositeVideoClip([ 253 | VideoFileClip(combined_video_path), 254 | subtitles.set_pos((horizontal_subtitles_position, vertical_subtitles_position)) 255 | ]) 256 | 257 | print(colored("[+] Adding audio...", "green")) 258 | # Add the audio 259 | audio = AudioFileClip(tts_path) 260 | result = result.set_audio(audio) 261 | print(colored("[+] Audio Done...", "green")) 262 | 263 | print(colored("[+] Writing video...", "green")) 264 | video_name = f"static/assets/{uuid4()}-final.mp4" 265 | result.write_videofile(f"../{video_name}", threads=3) 266 | 267 | return video_name 268 | -------------------------------------------------------------------------------- /UI/components/SearchDialog.vue: -------------------------------------------------------------------------------- 1 | 254 | 255 | 360 | 361 | 448 | -------------------------------------------------------------------------------- /Backend/classes/Shorts.py: -------------------------------------------------------------------------------- 1 | import os 2 | from utils import * 3 | 4 | from settings import * 5 | from gpt import * 6 | from search import * 7 | from termcolor import colored 8 | from flask import jsonify,json 9 | from video import * 10 | from tiktokvoice import * 11 | from uuid import uuid4 12 | from apiclient.errors import HttpError 13 | from moviepy.config import change_settings 14 | 15 | class Shorts: 16 | """ 17 | Class for creating VideoShorts. 18 | 19 | Steps to create a Video Short: 20 | 1. Generate a script [DONE] 21 | 2. Generate metadata (Title, Description, Tags) [DONE] 22 | 3. Get subtitles [DONE] 23 | 4. Get Videos related to the search term [DONE] 24 | 5. Convert Text-to-Speech [DONE] 25 | 6. Combine Videos [DONE] 26 | 7. Combine Videos with the Text-to-Speech [DONE] 27 | 7. Combine Videos with the Text-to-Speech [DONE] 28 | """ 29 | def __init__(self,video_subject: str, paragraph_number: int, ai_model: str,customPrompt: str="", extra_prompt: str = ""): 30 | """ 31 | Constructor for YouTube Class. 32 | 33 | Args: 34 | video_subject (str): The subject of the video. 35 | paragraph_number (int): The number of paragraphs to generate. 36 | ai_model (str): The AI model to use for generation. 37 | customPrompt (str): The custom prompt to use for generation. 38 | extra_prompt (str): The extra prompt to use for generation. 39 | 40 | Returns: 41 | None 42 | """ 43 | global GENERATING 44 | GENERATING = True 45 | 46 | 47 | change_settings({"IMAGEMAGICK_BINARY": os.getenv("IMAGEMAGICK_BINARY")}) 48 | 49 | 50 | self.video_subject = video_subject 51 | self.paragraph_number = paragraph_number 52 | self.ai_model = ai_model 53 | self.customPrompt = customPrompt 54 | self.extra_prompt = extra_prompt 55 | self.globalSettings = get_settings() 56 | 57 | 58 | # Generate a script 59 | self.final_script = "" 60 | self.search_terms = [] 61 | self.AMOUNT_OF_STOCK_VIDEOS= 5 62 | 63 | # Video from pexels 64 | self.video_urls = [] 65 | self.video_paths = [] 66 | self.videos_quantity_search = 15 67 | self.min_duration_search = 5 68 | # Voice related variables 69 | self.voice = "en_us_001" 70 | self.voice_prefix = self.voice[:2] 71 | 72 | # Audio and subtitles 73 | self.tts_path = None 74 | self.subtitles_path = None 75 | 76 | # Final video 77 | self.final_video_path = None 78 | 79 | # Video metadata 80 | self.video_title = None 81 | self.video_description = None 82 | self.video_tags = None 83 | 84 | # Subtitle 85 | self.subtitles_position="" 86 | self.final_music_video_path="" 87 | 88 | @property 89 | def get_final_video_path(self): 90 | return self.final_video_path 91 | @property 92 | def get_final_music_video_path(self): 93 | return self.final_music_video_path 94 | 95 | @property 96 | def get_final_script(self): 97 | return self.final_script 98 | 99 | @property 100 | def get_tts_path(self): 101 | return self.tts_path 102 | 103 | @property 104 | def get_subtitles_path(self): 105 | return self.subtitles_path 106 | 107 | @property 108 | def get_video_paths(self): 109 | return self.video_paths 110 | 111 | def GenerateScript(self): 112 | """ 113 | Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model. 114 | 115 | Args: 116 | video_subject (str): The subject of the video. 117 | paragraph_number (int): The number of paragraphs to generate. 118 | ai_model (str): The AI model to use for generation. 119 | Returns: 120 | 121 | str: The script for the video. 122 | """ 123 | 124 | if self.customPrompt and self.customPrompt != "": 125 | prompt = self.customPrompt 126 | else: 127 | prompt = self.globalSettings["scriptSettings"]["defaultPromptStart"] 128 | 129 | prompt += f""" 130 | # Initialization: 131 | - video subject: {self.video_subject} 132 | - number of paragraphs: {self.paragraph_number} 133 | {self.extra_prompt} 134 | 135 | """ 136 | # Add the global prompt end 137 | prompt += self.globalSettings["scriptSettings"]["defaultPromptEnd"] 138 | 139 | # Generate script 140 | response = generate_response(prompt, self.ai_model) 141 | 142 | print(colored(response, "cyan")) 143 | 144 | # Return the generated script 145 | if response: 146 | # Clean the script 147 | # Remove asterisks, hashes 148 | response = response.replace("*", "") 149 | response = response.replace("#", "") 150 | 151 | # Remove markdown syntax 152 | response = re.sub(r"\[.*\]", "", response) 153 | response = re.sub(r"\(.*\)", "", response) 154 | 155 | # Split the script into paragraphs 156 | paragraphs = response.split("\n\n") 157 | 158 | # Select the specified number of paragraphs 159 | selected_paragraphs = paragraphs[:self.paragraph_number] 160 | 161 | # Join the selected paragraphs into a single string 162 | final_script = "\n\n".join(selected_paragraphs) 163 | 164 | # Print to console the number of paragraphs used 165 | print(colored(f"Number of paragraphs used: {len(selected_paragraphs)}", "green")) 166 | 167 | self.final_script = final_script 168 | 169 | return final_script 170 | else: 171 | print(colored("[-] GPT returned an empty response.", "red")) 172 | return None 173 | 174 | def GenerateSearchTerms(self): 175 | self.search_terms = get_search_terms(self.video_subject, self.AMOUNT_OF_STOCK_VIDEOS, self.final_script, self.ai_model) 176 | 177 | return self.search_terms 178 | 179 | #Download the videos base on the search terms from pexel api 180 | def DownloadVideos(self, selectedVideoUrls): 181 | global GENERATING 182 | 183 | # Search for videos 184 | # Check if the selectedVideoUrls is empty 185 | if selectedVideoUrls and len(selectedVideoUrls) > 0: 186 | print(colored(f"Selected videos: {selectedVideoUrls}", "green")) 187 | # filter the selectedVideoUrls is a Array of objects with videoUrl object that has a link key with a value we use the value of the link key 188 | self.video_urls = [video_url["videoUrl"]["link"] for video_url in selectedVideoUrls] 189 | # log the selectedVideoUrls 190 | print(colored(f"Selected video urls: {self.video_urls}", "green")) 191 | else: 192 | for search_term in self.search_terms: 193 | global GENERATING 194 | if not GENERATING: 195 | return jsonify( 196 | { 197 | "status": "error", 198 | "message": "Video generation was cancelled.", 199 | "data": [], 200 | } 201 | ) 202 | found_urls = search_for_stock_videos( 203 | search_term, os.getenv("PEXELS_API_KEY"), self.videos_quantity_search, self.min_duration_search 204 | ) 205 | # Check for duplicates 206 | for url in found_urls: 207 | if url not in self.video_urls: 208 | self.video_urls.append(url) 209 | break 210 | 211 | # Check if video_urls is empty 212 | if not self.video_urls: 213 | print(colored("[-] No videos found to download.", "red")) 214 | return jsonify( 215 | { 216 | "status": "error", 217 | "message": "No videos found to download.", 218 | "data": [], 219 | } 220 | ) 221 | 222 | # Download the videos 223 | video_paths = [] 224 | # Let user know 225 | print(colored(f"[+] Downloading {len(self.video_urls)} videos...", "blue")) 226 | # Save the videos 227 | for video_url in self.video_urls: 228 | if not GENERATING: 229 | return jsonify( 230 | { 231 | "status": "error", 232 | "message": "Video generation was cancelled.", 233 | "data": [], 234 | } 235 | ) 236 | try: 237 | saved_video_path = save_video(video_url) 238 | video_paths.append(saved_video_path) 239 | except Exception: 240 | print(colored(f"[-] Could not download video: {video_url}", "red")) 241 | 242 | # Let user know 243 | print(colored("[+] Videos downloaded!", "green")) 244 | self.video_paths = video_paths 245 | 246 | 247 | def GenerateMetadata(self): 248 | self.video_title, self.video_description, self.video_tags = generate_metadata(self.video_subject, self.final_script, self.ai_model) 249 | 250 | # Write the metadata in a json file with the video title as the filename 251 | self.WriteMetadataToFile(self.video_title, self.video_description, self.video_tags) 252 | 253 | def GenerateVoice(self,voice): 254 | print(colored(f"[X] Generating voice: {voice} ", "green")) 255 | global GENERATING 256 | self.voice = voice 257 | self.voice_prefix = self.voice[:2] 258 | 259 | # Split script into sentences 260 | sentences = self.final_script.split(". ") 261 | 262 | # Remove empty strings 263 | sentences = list(filter(lambda x: x != "", sentences)) 264 | paths = [] 265 | 266 | # Generate TTS for every sentence 267 | for sentence in sentences: 268 | if not GENERATING: 269 | return jsonify( 270 | { 271 | "status": "error", 272 | "message": "Video generation was cancelled.", 273 | "data": [], 274 | } 275 | ) 276 | fileId = uuid4() 277 | current_tts_path = f"../static/assets/temp/{fileId}.mp3" 278 | tts(sentence, self.voice, filename=current_tts_path) 279 | 280 | # Add the audio clip to the list 281 | print(colored(f"[X] Save Audio ", "green")) 282 | audio_clip = AudioFileClip(f"../static/assets/temp/{fileId}.mp3") 283 | paths.append(audio_clip) 284 | 285 | # Combine all TTS files using moviepy 286 | 287 | print(colored(f"[X] Start saving the audio ", "green")) 288 | final_audio = concatenate_audioclips(paths) 289 | self.tts_path = f"../static/assets/temp/{uuid4()}.mp3" 290 | final_audio.write_audiofile(self.tts_path) 291 | 292 | # Generate the subtitles 293 | try: 294 | self.subtitles_path = generate_subtitles(audio_path=self.tts_path, sentences=sentences, audio_clips=paths, voice=self.voice_prefix) 295 | except Exception as e: 296 | print(colored(f"[-] Error generating subtitles: {e}", "red")) 297 | self.subtitles_path = None 298 | 299 | def CombineVideos(self): 300 | temp_audio = AudioFileClip(self.tts_path) 301 | n_threads = 2 302 | combined_video_path = combine_videos(self.video_paths, temp_audio.duration, 10, n_threads or 2) 303 | 304 | print(colored(f"[-] Next step: {combined_video_path}", "green")) 305 | # Put everything together 306 | try: 307 | self.final_video_path = generate_video(combined_video_path, self.tts_path, self.subtitles_path, n_threads or 2, self.subtitles_position) 308 | except Exception as e: 309 | print(colored(f"[-] Error generating final video: {e}", "red")) 310 | self.final_video_path = None 311 | 312 | def WriteMetadataToFile(video_title, video_description, video_tags): 313 | metadata = { 314 | "title": video_title, 315 | "description": video_description, 316 | "tags": video_tags 317 | } 318 | # Remplace spaces with underscores 319 | fileName = video_title.replace(" ", "_") 320 | 321 | with open(f"../../static/assets/temp/{fileName}.json", "w") as file: 322 | json.dump(metadata, file) 323 | 324 | def AddMusic(self, use_music,custom_song_path=""): 325 | video_clip = VideoFileClip(f"../{self.final_video_path}") 326 | 327 | self.final_music_video_path = f"{uuid4()}-music.mp4" 328 | n_threads = 2 329 | if use_music: 330 | # if no song path choose random song 331 | song_path = f"../static/assets/music/{custom_song_path}" 332 | if not custom_song_path: 333 | song_path = choose_random_song() 334 | 335 | 336 | # Add song to video at 30% volume using moviepy 337 | original_duration = video_clip.duration 338 | original_audio = video_clip.audio 339 | song_clip = AudioFileClip(song_path).set_fps(44100) 340 | 341 | # Set the volume of the song to 10% of the original volume 342 | song_clip = song_clip.volumex(0.1).set_fps(44100) 343 | 344 | # Add the song to the video 345 | comp_audio = CompositeAudioClip([original_audio, song_clip]) 346 | video_clip = video_clip.set_audio(comp_audio) 347 | video_clip = video_clip.set_fps(30) 348 | video_clip = video_clip.set_duration(original_duration) 349 | 350 | video_clip.write_videofile(f"../static/generated_videos/{self.final_music_video_path}", threads=n_threads or 1) 351 | else: 352 | video_clip.write_videofile(f"../static/generated_videos/{self.final_music_video_path}", threads=n_threads or 1) 353 | 354 | def Stop(self): 355 | global GENERATING 356 | # Stop FFMPEG processes 357 | if os.name == "nt": 358 | # Windows 359 | os.system("taskkill /f /im ffmpeg.exe") 360 | else: 361 | # Other OS 362 | os.system("pkill -f ffmpeg") 363 | 364 | GENERATING = False -------------------------------------------------------------------------------- /Backend/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from utils import * 3 | from dotenv import load_dotenv 4 | 5 | # Load environment variables 6 | load_dotenv("../.env") 7 | # Check if all required environment variables are set 8 | # This must happen before importing video which uses API keys without checking 9 | check_env_vars() 10 | 11 | from gpt import * 12 | from video import * 13 | from search import * 14 | from classes.Shorts import * 15 | from uuid import uuid4 16 | from tiktokvoice import * 17 | from flask_cors import CORS 18 | from termcolor import colored 19 | from youtube import upload_video 20 | from apiclient.errors import HttpError 21 | from flask import Flask, request, jsonify 22 | from moviepy.config import change_settings 23 | 24 | 25 | 26 | # Set environment variables 27 | SESSION_ID = os.getenv("TIKTOK_SESSION_ID") 28 | openai_api_key = os.getenv('OPENAI_API_KEY') 29 | change_settings({"IMAGEMAGICK_BINARY": os.getenv("IMAGEMAGICK_BINARY")}) 30 | 31 | 32 | # Initialize Flask 33 | app = Flask(__name__, static_folder="../static", static_url_path="/static") 34 | CORS(app) 35 | 36 | # Constants 37 | HOST = "0.0.0.0" 38 | PORT = 8080 39 | AMOUNT_OF_STOCK_VIDEOS = 5 40 | GENERATING = False 41 | 42 | # Create a method to create all the required folders 43 | def create_folders(): 44 | # Create static folder if it doesn't exist 45 | if not os.path.exists("../static"): 46 | os.makedirs("../static") 47 | # Create static/Songs and static/generated_videos folder if it doesn't exist 48 | if not os.path.exists("../static/assets"): 49 | os.makedirs("../static/assets") 50 | if not os.path.exists("../static/generated_videos"): 51 | os.makedirs("../static/generated_videos") 52 | 53 | 54 | # Create folders 55 | create_folders() 56 | 57 | 58 | # Generation Endpoint 59 | @app.route("/api/generate", methods=["POST"]) 60 | def generate(): 61 | try: 62 | # Set global variable 63 | global GENERATING 64 | GENERATING = True 65 | 66 | # Clean 67 | clean_dir("../static/assets/temp/") 68 | clean_dir("../static/assets/subtitles/") 69 | 70 | 71 | # Parse JSON 72 | data = request.get_json() 73 | paragraph_number = int(data.get('paragraphNumber', 1)) # Default to 1 if not provided 74 | ai_model = data.get('aiModel') # Get the AI model selected by the user 75 | n_threads = data.get('threads') # Amount of threads to use for video generation 76 | subtitles_position = data.get('subtitlesPosition') # Position of the subtitles in the video 77 | 78 | # Get 'useMusic' from the request data and default to False if not provided 79 | use_music = data.get('useMusic', False) 80 | 81 | # Get 'automateYoutubeUpload' from the request data and default to False if not provided 82 | automate_youtube_upload = data.get('automateYoutubeUpload', False) 83 | # Print little information about the video which is to be generated 84 | print(colored("[Video to be generated]", "blue")) 85 | print(colored(" Subject: " + data["videoSubject"], "blue")) 86 | print(colored(" AI Model: " + ai_model, "blue")) # Print the AI model being used 87 | print(colored(" Custom Prompt: " + data["customPrompt"], "blue")) # Print the AI model being used 88 | 89 | 90 | 91 | if not GENERATING: 92 | return jsonify( 93 | { 94 | "status": "error", 95 | "message": "Video generation was cancelled.", 96 | "data": [], 97 | } 98 | ) 99 | 100 | voice = data["voice"] 101 | voice_prefix = voice[:2] 102 | 103 | 104 | if not voice: 105 | print(colored("[!] No voice was selected. Defaulting to \"en_us_001\"", "yellow")) 106 | voice = "en_us_001" 107 | voice_prefix = voice[:2] 108 | 109 | 110 | videoClass = Shorts(data["videoSubject"], paragraph_number, ai_model, data["customPrompt"]) 111 | # Generate a script 112 | videoClass.GenerateScript() 113 | # Generate search terms 114 | videoClass.GenerateSearchTerms() 115 | 116 | videoClass.DownloadVideos() 117 | 118 | if not GENERATING: 119 | return jsonify( 120 | { 121 | "status": "error", 122 | "message": "Video generation was cancelled.", 123 | "data": [], 124 | } 125 | ) 126 | 127 | videoClass.GenerateVoice(voice) 128 | # Concatenate videos 129 | videoClass.CombineVideos() 130 | 131 | videoClass.GenerateMetadata() 132 | 133 | if automate_youtube_upload: 134 | # Start Youtube Uploader 135 | # Check if the CLIENT_SECRETS_FILE exists 136 | client_secrets_file = os.path.abspath("./client_secret.json") 137 | SKIP_YT_UPLOAD = False 138 | if not os.path.exists(client_secrets_file): 139 | SKIP_YT_UPLOAD = True 140 | print(colored("[-] Client secrets file missing. YouTube upload will be skipped.", "yellow")) 141 | print(colored("[-] Please download the client_secret.json from Google Cloud Platform and store this inside the /Backend directory.", "red")) 142 | 143 | # Only proceed with YouTube upload if the toggle is True and client_secret.json exists. 144 | if not SKIP_YT_UPLOAD: 145 | # Choose the appropriate category ID for your videos 146 | video_category_id = "28" # Science & Technology 147 | privacyStatus = "private" # "public", "private", "unlisted" 148 | video_metadata = { 149 | 'video_path': os.path.abspath(f"../temp/{final_video_path}"), 150 | 'title': title, 151 | 'description': description, 152 | 'category': video_category_id, 153 | 'keywords': ",".join(keywords), 154 | 'privacyStatus': privacyStatus, 155 | } 156 | 157 | # Upload the video to YouTube 158 | try: 159 | # Unpack the video_metadata dictionary into individual arguments 160 | video_response = upload_video( 161 | video_path=video_metadata['video_path'], 162 | title=video_metadata['title'], 163 | description=video_metadata['description'], 164 | category=video_metadata['category'], 165 | keywords=video_metadata['keywords'], 166 | privacy_status=video_metadata['privacyStatus'] 167 | ) 168 | print(f"Uploaded video ID: {video_response.get('id')}") 169 | except HttpError as e: 170 | print(f"An HTTP error {e.resp.status} occurred:\n{e.content}") 171 | 172 | 173 | videoClass.AddMusic(use_music) 174 | # Let user know 175 | print(colored(f"[+] Video generated: {videoClass.get_final_video_path}!", "green")) 176 | videoClass.Stop() 177 | 178 | # Return JSON 179 | return jsonify( 180 | { 181 | "status": "success", 182 | "message": "Video generated! See MoneyPrinter/output.mp4 for result.", 183 | "data": videoClass.get_final_video_path, 184 | } 185 | ) 186 | except Exception as err: 187 | print(colored(f"[-] Error: {str(err)}", "red")) 188 | return jsonify( 189 | { 190 | "status": "error", 191 | "message": f"Could not retrieve stock videos: {str(err)}", 192 | "data": [], 193 | } 194 | ) 195 | 196 | 197 | @app.route("/api/cancel", methods=["POST"]) 198 | def cancel(): 199 | print(colored("[!] Received cancellation request...", "yellow")) 200 | 201 | global GENERATING 202 | GENERATING = False 203 | 204 | return jsonify({"status": "success", "message": "Cancelled video generation."}) 205 | 206 | # Route to generate the script and return the video script 207 | @app.route("/api/script", methods=["POST"]) 208 | def generate_script_only(): 209 | # Set generating to true 210 | GENERATING = True 211 | 212 | clean_dir("../static/assets/subtitles/") 213 | print(colored("[+] Received script request...", "green")) 214 | 215 | data = request.get_json() 216 | video_subject = data["videoSubject"] 217 | extra_prompt = data["extraPrompt"] 218 | ai_model = data["aiModel"] 219 | 220 | videoClass = Shorts(video_subject, 1, ai_model, "",extra_prompt=extra_prompt) 221 | script = videoClass.GenerateScript() 222 | 223 | 224 | 225 | search_terms = videoClass.GenerateSearchTerms() 226 | 227 | # Show the search terms 228 | print(colored(f"Search terms: {', '.join(search_terms)}", "cyan")) 229 | 230 | return jsonify( 231 | { 232 | "status": "success", 233 | "message": "Script generated!", 234 | "data": { 235 | "script": script, 236 | "search": search_terms 237 | }, 238 | } 239 | ) 240 | 241 | # Download the videos and split the script 242 | @app.route("/api/search-and-download", methods=["POST"]) 243 | def search_and_download(): 244 | # Set generating to true 245 | global GENERATING 246 | GENERATING = True 247 | # Clean 248 | clean_dir("../static/assets/temp") 249 | clean_dir("../static/assets/subtitles") 250 | 251 | 252 | print(colored("[+] Received search and download request...", "green")) 253 | 254 | data = request.get_json() 255 | search_terms = data["search"] 256 | script = data["script"] 257 | ai_model = data["aiModel"] 258 | voice = data["voice"] 259 | selectedVideoUrls = data.get("selectedVideoUrls",[]) 260 | 261 | # Extra options: 262 | custom_video = data.get("videoUrls",[]) 263 | custom_voice = data.get("voiceUrl","") 264 | # Set the default subtitles_position to the center, bottom 265 | subtitles_position = data.get("subtitlesPosition", "center,bottom") 266 | n_threads = data.get('threads', 4) 267 | 268 | if not voice: 269 | print(colored("[!] No voice was selected. Defaulting to \"en_us_001\"", "yellow")) 270 | voice = "en_us_001" 271 | # Search for a video of the given search term 272 | videoClass = Shorts("", 1, ai_model, '') 273 | videoClass.search_terms = search_terms 274 | videoClass.final_script = script 275 | videoClass.subtitles_position = subtitles_position 276 | 277 | videoClass.DownloadVideos(selectedVideoUrls) 278 | 279 | videoClass.GenerateVoice(voice) 280 | 281 | videoClass.CombineVideos() 282 | 283 | videoClass.Stop() 284 | 285 | return jsonify( 286 | { 287 | "status": "success", 288 | "message": "Search and download complete!", 289 | "data": { 290 | "finalAudio": videoClass.get_tts_path , 291 | "subtitles": videoClass.get_subtitles_path, 292 | "finalVideo": videoClass.get_final_video_path 293 | } 294 | } 295 | ) 296 | 297 | # Add audio to the video 298 | @app.route("/api/addAudio", methods=["POST"]) 299 | def addAudio(): 300 | GENERATING = True 301 | data = request.get_json() 302 | final_video_path = data["finalVideo"] 303 | song_path = data["songPath"] 304 | ai_model = data["aiModel"] 305 | 306 | videoClass = Shorts("", 1, ai_model, '') 307 | videoClass.final_video_path = final_video_path 308 | 309 | videoClass.AddMusic(True,song_path) 310 | 311 | videoClass.Stop() 312 | return jsonify( 313 | { 314 | "status": "success", 315 | "message": "Search and download complete!", 316 | "data": { 317 | "finalVideo": "static/generated_videos/" + videoClass.get_final_music_video_path 318 | } 319 | } 320 | ) 321 | 322 | 323 | # Get all available songs 324 | @app.route("/api/getSongs", methods=["GET"]) 325 | def get_songs(): 326 | songs = os.listdir("../static/assets/music") 327 | return jsonify({ 328 | "status": "success", 329 | "message": "Songs retrieved successfully!", 330 | "data": { 331 | "songs": songs 332 | } 333 | }) 334 | 335 | # Get all available videos 336 | @app.route("/api/getVideos", methods=["GET"]) 337 | def get_videos(): 338 | # Get all videos mp4 only 339 | videos = os.listdir("../static/generated_videos") 340 | videos = [video for video in videos if video.endswith(".mp4")] 341 | return jsonify( 342 | { 343 | "status": "success", 344 | "message": "Videos retrieved successfully!", 345 | "data": { 346 | "videos": videos 347 | } 348 | } 349 | ) 350 | 351 | # Get all available subtitles 352 | @app.route("/api/getSubtitles", methods=["GET"]) 353 | def get_subtitles(): 354 | subtitles = os.listdir("../static/assets/subtitles") 355 | return jsonify( 356 | { 357 | "status": "success", 358 | "message": "Songs retrieved successfully!", 359 | "data": { 360 | "subtitles": subtitles 361 | } 362 | } 363 | ) 364 | 365 | 366 | #Get all available models and voices 367 | @app.route("/api/models", methods=["GET"]) 368 | def get_models(): 369 | return jsonify( 370 | { 371 | "status": "success", 372 | "message": "Songs retrieved successfully!", 373 | "data": { 374 | "voices": available_voices() 375 | } 376 | } 377 | ) 378 | 379 | 380 | @app.route("/api/assets", methods=["GET"]) 381 | def get_assets(): 382 | video_assets = os.listdir("../static/assets/temp") 383 | videos = [video for video in videos if video.endswith(".mp4")] 384 | return jsonify( 385 | { 386 | "status": "success", 387 | "message": "Assets retrieved successfully!", 388 | "data": { 389 | "videos": videos 390 | } 391 | } 392 | ) 393 | 394 | 395 | 396 | @app.route("/api/settings", methods=["GET"]) 397 | def get_global_settings(): 398 | 399 | global_settings = get_settings() 400 | return jsonify( 401 | { 402 | "status": "success", 403 | "message": "System settings retrieved successfully!", 404 | "data": global_settings 405 | } 406 | ) 407 | 408 | if __name__ == "__main__": 409 | 410 | # Run Flask App 411 | app.run(debug=True, host=HOST, port=PORT) 412 | -------------------------------------------------------------------------------- /UI/pages/generate/index.vue: -------------------------------------------------------------------------------- 1 | 135 | 136 | 422 | 423 | --------------------------------------------------------------------------------