├── .database
└── template_asset_db.json
├── .github
├── CHANGE_LOG.md
├── CODEOWNERS
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.yaml
│ ├── feature_request.yaml
│ └── question.yaml
├── SECURITY.md
├── config.yml
├── issue_label_bot.yaml
├── pull_request_template.md
├── settings.yml
└── workflows
│ └── generate_release-changelog.yaml
├── .gitignore
├── CHANGES.txt
├── Dockerfile
├── LICENSE
├── README-Docker.md
├── README.md
├── assets
└── img
│ └── logo.png
├── docs
├── .gitignore
├── README.md
├── babel.config.js
├── docs
│ ├── api-key-manager.mdx
│ ├── asset-database.mdx
│ ├── content-translation-engine.mdx
│ ├── content-video-engine.mdx
│ ├── facts-short-engine.mdx
│ ├── getting-started.mdx
│ └── how-to-install.mdx
├── docusaurus.config.js
├── package.json
├── plugins
│ ├── my-loaders
│ │ └── index.js
│ └── tailwind-loader
│ │ └── index.js
├── sidebars.js
├── src
│ ├── components
│ │ └── Home.js
│ ├── css
│ │ ├── custom.css
│ │ └── fragments.css
│ └── pages
│ │ └── index.js
├── static
│ └── img
│ │ ├── assets
│ │ ├── configuration.svg
│ │ ├── implementation.svg
│ │ └── scraping.svg
│ │ ├── favicon.ico
│ │ └── logo.png
├── tailwind.config.js
└── yarn.lock
├── fonts
├── LuckiestGuy-Regular.ttf
├── ObelixProB-cyr.ttf
└── Roboto-Bold.ttf
├── gui
├── __pycache__
│ ├── content_automation_ui.cpython-39.pyc.1849492106672
│ └── short_automation_ui.cpython-39.pyc.1870860912944
├── asset_components.py
├── content_automation_ui.py
├── gui_gradio.py
├── ui_abstract_base.py
├── ui_abstract_component.py
├── ui_components_html.py
├── ui_tab_asset_library.py
├── ui_tab_config.py
├── ui_tab_short_automation.py
├── ui_tab_video_automation.py
└── ui_tab_video_translation.py
├── installation-notes.md
├── public
├── subscribe-animation.mp4
└── white_reddit_template.png
├── requirements.txt
├── runShortGPT.py
├── runShortGPTColab.py
├── setup.py
├── shortGPT
├── __init__.py
├── api_utils
│ ├── README.md
│ ├── __init__.py
│ ├── eleven_api.py
│ ├── image_api.py
│ └── pexels_api.py
├── audio
│ ├── README.md
│ ├── __init__.py
│ ├── audio_duration.py
│ ├── audio_utils.py
│ ├── edge_voice_module.py
│ ├── eleven_voice_module.py
│ └── voice_module.py
├── config
│ ├── README.md
│ ├── __init__.py
│ ├── api_db.py
│ ├── asset_db.py
│ ├── config.py
│ ├── languages.py
│ └── path_utils.py
├── database
│ ├── README.md
│ ├── __init__.py
│ ├── content_data_manager.py
│ ├── content_database.py
│ └── db_document.py
├── editing_framework
│ ├── README.md
│ ├── __init__.py
│ ├── core_editing_engine.py
│ ├── editing_engine.py
│ ├── editing_steps
│ │ ├── __init__.py
│ │ ├── add_background_video.json
│ │ ├── add_background_voiceover.json
│ │ ├── add_voiceover.json
│ │ ├── background_music.json
│ │ ├── crop_1920x1080_to_short.json
│ │ ├── extract_audio.json
│ │ ├── insert_audio.json
│ │ ├── make_caption.json
│ │ ├── make_caption_arabic.json
│ │ ├── make_caption_arabic_landscape.json
│ │ ├── make_caption_landscape.json
│ │ ├── show_reddit_image.json
│ │ ├── show_top_image.json
│ │ ├── show_watermark.json
│ │ └── subscribe_animation.json
│ ├── flows
│ │ ├── __init__.py
│ │ └── build_reddit_image.json
│ └── rendering_logger.py
├── editing_utils
│ ├── README.md
│ ├── __init__.py
│ ├── captions.py
│ ├── editing_images.py
│ └── handle_videos.py
├── engine
│ ├── README.md
│ ├── __init__.py
│ ├── abstract_content_engine.py
│ ├── content_short_engine.py
│ ├── content_translation_engine.py
│ ├── content_video_engine.py
│ ├── facts_short_engine.py
│ ├── multi_language_translation_engine.py
│ └── reddit_short_engine.py
├── gpt
│ ├── README.md
│ ├── __init__.py
│ ├── facts_gpt.py
│ ├── gpt_chat_video.py
│ ├── gpt_editing.py
│ ├── gpt_translate.py
│ ├── gpt_utils.py
│ ├── gpt_voice.py
│ ├── gpt_yt.py
│ └── reddit_gpt.py
├── prompt_templates
│ ├── __init__.py
│ ├── chat_video_edit_script.yaml
│ ├── chat_video_script.yaml
│ ├── editing_generate_images.yaml
│ ├── editing_generate_videos.yaml
│ ├── facts_generator.yaml
│ ├── facts_subjects_generation.yaml
│ ├── reddit_extract_question.yaml
│ ├── reddit_filter_realistic.yaml
│ ├── reddit_generate_question.yaml
│ ├── reddit_generate_script.yaml
│ ├── reddit_story_filter.yaml
│ ├── reddit_username.yaml
│ ├── translate_content.yaml
│ ├── voice_identify_gender.yaml
│ └── yt_title_description.yaml
├── tracking
│ ├── README.md
│ ├── __init__.py
│ ├── api_tracking.py
│ └── cost_analytics.py
└── utils
│ ├── cli.py
│ └── requirements.py
└── videos
├── .gitignore
└── archive
└── .gitignore
/.database/template_asset_db.json:
--------------------------------------------------------------------------------
1 | {
2 | "asset_collection": {
3 | "1": {
4 | "_id": "local_assets",
5 | "white_reddit_template": {
6 | "path": "public/white_reddit_template.png",
7 | "type": "image",
8 | "ts": "2023-07-03 19:41:55",
9 | "required": true
10 | },
11 | "subscribe-animation": {
12 | "path": "public/subscribe-animation.mp4",
13 | "type": "video",
14 | "ts": "2023-07-03 21:37:53",
15 | "required": true
16 | }
17 | },
18 | "2": {
19 | "_id": "remote_assets",
20 | "Music joakim karud dreams": {
21 | "type": "background music",
22 | "url": "https://www.youtube.com/watch?v=p56gqDhUYbU",
23 | "ts": "2023-07-05 04:35:03"
24 | },
25 | "Music dj quads": {
26 | "type": "background music",
27 | "url": "https://www.youtube.com/watch?v=uUu1NcSHg2E",
28 | "ts": "2023-07-05 05:03:44"
29 | },
30 | "Car race gameplay": {
31 | "type": "background video",
32 | "url": "https://www.youtube.com/watch?v=gBsJA8tCeyc",
33 | "ts": "2023-07-04 23:07:44"
34 | },
35 | "Minecraft jumping circuit": {
36 | "url": "https://www.youtube.com/watch?v=Pt5_GSKIWQM",
37 | "type": "background video",
38 | "ts": "2023-07-07 04:13:36"
39 | },
40 | "Ski gameplay": {
41 | "url": "https://www.youtube.com/watch?v=8ao1NAOVKTU",
42 | "type": "background video",
43 | "ts": "2023-07-07 04:54:16"
44 | }
45 | }
46 | }
47 | }
--------------------------------------------------------------------------------
/.github/CHANGE_LOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | ## [Unreleased]
6 |
7 |
11 |
12 | Upcoming changes.
13 |
14 | ### Added
15 |
16 | ### Changed
17 |
18 | ### Removed
19 |
20 | ## [0.0.1] - YYYY-MM-DD
21 |
22 | Initial Release.
23 |
24 | ### Added
25 |
26 | - What was added.
27 |
28 |
29 |
33 | [Unreleased]: /
34 | [0.0.1]: /v0.0.1
35 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # These owners will be the default owners for everything in
2 | # the repo. Unless a later match takes precedence,
3 | # @USER will be requested for
4 | # review when someone opens a pull request.
5 | # if you want to add more owners just write it after the demo user @DemoUser
6 | * @RayVentura
7 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement.
63 | All complaints will be reviewed and investigated promptly and fairly.
64 |
65 | All community leaders are obligated to respect the privacy and security of the
66 | reporter of any incident.
67 |
68 | ## Enforcement Guidelines
69 |
70 | Community leaders will follow these Community Impact Guidelines in determining
71 | the consequences for any action they deem in violation of this Code of Conduct:
72 |
73 | ### 1. Correction
74 |
75 | **Community Impact**: Use of inappropriate language or other behavior deemed
76 | unprofessional or unwelcome in the community.
77 |
78 | **Consequence**: A private, written warning from community leaders, providing
79 | clarity around the nature of the violation and an explanation of why the
80 | behavior was inappropriate. A public apology may be requested.
81 |
82 | ### 2. Warning
83 |
84 | **Community Impact**: A violation through a single incident or series
85 | of actions.
86 |
87 | **Consequence**: A warning with consequences for continued behavior. No
88 | interaction with the people involved, including unsolicited interaction with
89 | those enforcing the Code of Conduct, for a specified period of time. This
90 | includes avoiding interactions in community spaces as well as external channels
91 | like social media. Violating these terms may lead to a temporary or
92 | permanent ban.
93 |
94 | ### 3. Temporary Ban
95 |
96 | **Community Impact**: A serious violation of community standards, including
97 | sustained inappropriate behavior.
98 |
99 | **Consequence**: A temporary ban from any sort of interaction or public
100 | communication with the community for a specified period of time. No public or
101 | private interaction with the people involved, including unsolicited interaction
102 | with those enforcing the Code of Conduct, is allowed during this period.
103 | Violating these terms may lead to a permanent ban.
104 |
105 | ### 4. Permanent Ban
106 |
107 | **Community Impact**: Demonstrating a pattern of violation of community
108 | standards, including sustained inappropriate behavior, harassment of an
109 | individual, or aggression toward or disparagement of classes of individuals.
110 |
111 | **Consequence**: A permanent ban from any sort of public interaction within
112 | the community.
113 |
114 | ## Attribution
115 |
116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
117 | version 2.0, available at
118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
119 |
120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
121 | enforcement ladder](https://github.com/mozilla/diversity).
122 |
123 | [homepage]: https://www.contributor-covenant.org
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | https://www.contributor-covenant.org/faq. Translations are available at
127 | https://www.contributor-covenant.org/translations.
128 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | 🌟💻📚
2 |
3 | ## Contributing
4 |
5 | There are many exciting ways to contribute to ShortGPT, our AI automated content creation framework. 👏
6 |
7 | See below for everything you can do and the processes to follow for each contribution method. Note that no matter how you contribute, your participation is governed by our ✨[Code of Conduct](CODE_OF_CONDUCT.md)✨.
8 |
9 | ## 🛠️ Make changes to the code or docs
10 |
11 | - 🍴 Fork the project,
12 | - 💡 make your changes,
13 | - 🔀 and send a pull request! 🙌
14 |
15 | Make sure you read and follow the instructions in the [pull request template](pull_request_template.md). And note that all participation in this project (including code submissions) is governed by our ✨[Code of Conduct](CODE_OF_CONDUCT.md)✨.
16 |
17 | ## 🐞📝 Submit bug reports or feature requests
18 |
19 | Just use the GitHub issue tracker to submit your bug reports and feature requests. We appreciate your feedback! 🐛🔧
20 |
21 | Let's make ShortGPT even better together! 🚀❤️
22 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: rayventura
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: rayventura
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
14 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yaml:
--------------------------------------------------------------------------------
1 | name: 🐛 Bug Report
2 | description: File a bug report
3 | title: '🐛 [Bug]: '
4 | labels: ['bug']
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Thanks for taking the time to fill out this bug report!
11 |
12 | - type: textarea
13 | id: what-happened
14 | attributes:
15 | label: What happened?
16 | description: Describe the issue here.
17 | placeholder: Tell us what you see!
18 | validations:
19 | required: true
20 |
21 | - type: dropdown
22 | id: browsers
23 | attributes:
24 | label: What type of browser are you seeing the problem on?
25 | multiple: true
26 | options:
27 | - Firefox
28 | - Chrome
29 | - Safari
30 | - Microsoft Edge
31 | validations:
32 | required: true
33 |
34 | - type: dropdown
35 | id: operating-systems
36 | attributes:
37 | label: What type of Operating System are you seeing the problem on?
38 | multiple: true
39 | options:
40 | - Linux
41 | - Windows
42 | - Mac
43 | - Google Colab
44 | - Other
45 | validations:
46 | required: true
47 |
48 | - type: input
49 | id: python-version
50 | attributes:
51 | label: Python Version
52 | description: What version of Python are you using?
53 | placeholder: e.g. Python 3.9.0
54 | validations:
55 | required: true
56 |
57 | - type: input
58 | id: application-version
59 | attributes:
60 | label: Application Version
61 | description: What version of the application are you using?
62 | placeholder: e.g. v1.2.3
63 | validations:
64 | required: true
65 |
66 | - type: textarea
67 | id: expected-behavior
68 | attributes:
69 | label: Expected Behavior
70 | description: What did you expect to happen?
71 | placeholder: What did you expect?
72 | validations:
73 | required: true
74 |
75 | - type: textarea
76 | id: error-message
77 | attributes:
78 | label: Error Message
79 | description: What error message did you receive?
80 | placeholder:
81 | render: shell
82 | validations:
83 | required: false
84 |
85 | - type: textarea
86 | id: logs
87 | attributes:
88 | label: Code to produce this issue.
89 | description: Please copy and paste any relevant code to re-produce this issue.
90 | render: shell
91 |
92 | - type: textarea
93 | id: screenshots-assets
94 | attributes:
95 | label: Screenshots/Assets/Relevant links
96 | description: If applicable, add screenshots, assets or any relevant links that can help understand the issue.
97 | placeholder: Provide any relevant material here
98 | validations:
99 | required: false
100 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yaml:
--------------------------------------------------------------------------------
1 | name: ✨ Feature request
2 | description: Suggest an feature / idea for this project
3 | title: '✨ [Feature Request / Suggestion]: '
4 | labels: ['feature']
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | We appreciate your feedback on how to improve this project. Please be sure to include as much details & any resources if possible!
10 |
11 | - type: textarea
12 | id: Suggestion
13 | attributes:
14 | label: Suggestion / Feature Request
15 | description: Describe the feature(s) you would like to see added.
16 | placeholder: Tell us your suggestion
17 | validations:
18 | required: true
19 |
20 | - type: textarea
21 | id: why-usage
22 | attributes:
23 | label: Why would this be useful?
24 | description: Describe why this feature would be useful.
25 | placeholder: Tell us why this would be useful to have this feature
26 | validations:
27 | required: false
28 |
29 | - type: textarea
30 | id: screenshots-assets
31 | attributes:
32 | label: Screenshots/Assets/Relevant links
33 | description: If applicable, add screenshots, assets or any relevant links that can help understand the issue.
34 | placeholder: Provide any relevant material here
35 | validations:
36 | required: false
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.yaml:
--------------------------------------------------------------------------------
1 | name: ❓ Question
2 | description: Ask a question about this project
3 | title: '❓ [Question]: '
4 | labels: ['question']
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | We appreciate your interest in this project. Please be sure to include as much detail & context about your question as possible!
10 |
11 | - type: textarea
12 | id: Question
13 | attributes:
14 | label: Your Question
15 | description: Describe your question in detail.
16 | validations:
17 | required: true
18 |
--------------------------------------------------------------------------------
/.github/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | ------- | ------------------ |
7 | | 0.0.x | :x: |
8 |
9 | ## 🔒️ Reporting a Vulnerability
10 |
11 | If you have identified a security vulnerability in system or product please `RayVentura` with your findings. We strongly recommend using our `PGP key` to prevent this information from falling into the wrong hands.
12 |
13 | ### Disclosure Policy
14 |
15 | Upon receipt of a security report the following steps will be taken:
16 |
17 | - Acknowledge your report within 48 hours, and provide a further more detailed update within 48 hours.
18 | - Confirm the problem and determine the affected versions
19 | - Keep you informed of the progress towards resolving the problem and notify you when the vulnerability has been fixed.
20 | - Audit code to find any potential similar problems.
21 | - Prepare fixes for all releases still under maintenance. These fixes will be released as fast as possible.
22 | - Handle your report with strict confidentiality, and not pass on your personal details to third parties without your permission.
23 |
24 | Whilst the issue is under investigation
25 |
26 | - **Do** provide as much information as possible.
27 | - **Do not** exploit of the vulnerability or problem you have discovered.
28 | - **Do not** reveal the problem to others until it has been resolved.
29 |
--------------------------------------------------------------------------------
/.github/config.yml:
--------------------------------------------------------------------------------
1 | # Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
2 |
3 | # Comment to be posted to on first time issues
4 | newIssueWelcomeComment: >
5 | Thanks for opening your first issue! Reports like these help improve the project!
6 |
7 | # Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
8 |
9 | # Comment to be posted to on PRs from first time contributors in your repository
10 | newPRWelcomeComment: >
11 | Thanks for opening this pull request!
12 |
13 | # Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
14 |
15 | # Comment to be posted to on pull requests merged by a first time user
16 | firstPRMergeComment: >
17 | Congrats on merging your first pull request!
18 |
19 | # The keyword to find for Todo Bot issue
20 | todo:
21 | keyword: '@todo'
22 |
--------------------------------------------------------------------------------
/.github/issue_label_bot.yaml:
--------------------------------------------------------------------------------
1 | label-alias:
2 | bug: 'Type: Bug'
3 | feature_request: 'Type: Feature'
4 | question: 'Type: Question'
5 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Proposed changes
2 |
3 | Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue. 👀🔧
4 |
5 | ## Types of changes
6 |
7 | What types of changes does your code introduce to this project?
8 | _Put an `x` in the boxes that apply_ 😄🚀
9 |
10 | - [ ] Bugfix (non-breaking change which fixes an issue) 🐛
11 | - [ ] New feature (non-breaking change which adds functionality) ✨
12 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 💥
13 | - [ ] Documentation Update (if none of the other choices apply) 📖
14 |
15 | ## Checklist
16 |
17 | _Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code._ ✅
18 |
19 | - [ ] I have read the CONTRIBUTING.md 📚
20 | - [ ] I have added tests that prove my fix is effective or that my feature works ✅✔️
21 | - [ ] I have added necessary documentation (if appropriate) 📝
22 |
23 | ## Further comments
24 |
25 | If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc... 💡❓
26 |
27 |
28 | ## References and related issues (e.g. #1234)
29 |
30 | N/A 📌
31 |
--------------------------------------------------------------------------------
/.github/workflows/generate_release-changelog.yaml:
--------------------------------------------------------------------------------
1 | name: Create Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
7 |
8 | jobs:
9 | build:
10 | name: Create Release
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v2
15 | with:
16 | fetch-depth: 0
17 | - name: Changelog
18 | uses: Bullrich/generate-release-changelog@master
19 | id: Changelog
20 | env:
21 | REPO: ${{ github.repository }}
22 | - name: Create Release
23 | id: create_release
24 | uses: actions/create-release@latest
25 | env:
26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
27 | with:
28 | tag_name: ${{ github.ref }}
29 | release_name: Release ${{ github.ref }}
30 | body: |
31 | ${{ steps.Changelog.outputs.changelog }}
32 | draft: false
33 | prerelease: false
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | !*.py
2 | !*.json
3 | !*.yaml
4 | !*.template
5 | *.pyc
6 | **/__pycache__/
7 | test.py
8 | public/*
9 | !public/white_reddit_template.png
10 | !public/subscribe-animation.mp4
11 | z_doc/*
12 | z_other/*
13 | videos/*
14 | .logs/
15 | .editing_assets/*
16 | .database/api_db.json
17 | .database/content_db.json
18 | .database/asset_db.json
19 | flagged/
20 | .vscode
21 | .env
22 | ShortGPT.egg-info
23 | dist
24 | build
25 | setup
26 | test.ipynb
27 | .venv/
28 | MANIFEST.in
29 | schema.json
30 | video.mp4
31 | Untitled-1.ipynb
--------------------------------------------------------------------------------
/CHANGES.txt:
--------------------------------------------------------------------------------
1 | # CHANGES
2 |
3 | ## Version 0.1.31
4 | - Fixing issue in AssetDatabase, where it was copying unexisting asset template file
5 | ## Version 0.1.3
6 | - Requiring a youtube url as the subscribe animation url in the EditingStep.ADD_SUBSCRIBE_ANIMATION step.
7 | - Adding a default subscribe animation youtube link by default shipped in the AssetDatabase
8 | - Making path imports relative for gpt prompts and editing blocks and flows.
9 | ## Version 0.1.2
10 | - Improving logs in content engines
11 | ## Version 0.1.1
12 | - Adding AssetType in AssetDatabase
13 | - Adding ApiProvider in api_db
14 | - Fixing pip libary missing editing_framework module, prompt_template module
15 | ## Version 0.1.0
16 | - Fixing the AssetDatabase when it's empty
17 | ## Version 0.0.2
18 | - Implemented the content_translation_engine; a multilingual video dubbing content engine. The source can be found at shortGPT/engine/content_translation_engine.py.
19 | - Implemented the new EdgeTTS voice module; it can be found at shortgpt/audio/edge_voice_module.
20 | - Added documentation which can be found under docs/.
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python runtime as the parent image
2 | FROM python:3.10-slim-bullseye
3 | RUN apt-get update && apt-get install -y ffmpeg
4 |
5 | # Set the working directory in the container to /app
6 | WORKDIR /app
7 |
8 | # Install any Python packages specified in requirements.txt
9 | # Copy requirements file
10 | COPY requirements.txt .
11 |
12 | # Install dependencies
13 | RUN pip install -r requirements.txt
14 |
15 | # Copy the local package directory content into the container at /app
16 | COPY . /app
17 |
18 | EXPOSE 31415
19 |
20 | # Define any environment variables
21 | # ENV KEY Value
22 |
23 | # Print environment variables (for debugging purposes, you can remove this line if not needed)
24 | RUN ["printenv"]
25 |
26 | # Run Python script when the container launches
27 | CMD ["python", "-u", "./runShortGPT.py"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Ray Ventura
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README-Docker.md:
--------------------------------------------------------------------------------
1 | # To run ShortGPT docker:
2 |
3 |
4 | First make a .env file with the API keys like this:
5 |
6 | ```bash
7 | GEMINI_API_KEY=put_your_gemini_api_key_here
8 | OPENAI_API_KEY=sk-_put_your_openai_api_key_here
9 | ELEVENLABS_API_KEY=put_your_eleven_labs_api_key_here
10 | PEXELS_API_KEY=put_your_pexels_api_key_here
11 | ```
12 |
13 |
14 | To run Dockerfile do this:
15 | ```bash
16 | docker build -t short_gpt_docker:latest .
17 | docker run -p 31415:31415 --env-file .env short_gpt_docker:latest
18 | ```
19 | Export Docker image:
20 | ```bash
21 | docker save short_gpt_docker > short_gpt_docker.tar
22 | ```
23 |
--------------------------------------------------------------------------------
/assets/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/assets/img/logo.png
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 |
4 | # Production
5 | /build
6 |
7 | # Generated files
8 | .docusaurus
9 | .cache-loader
10 |
11 | # Misc
12 | .DS_Store
13 | .env.local
14 | .env.development.local
15 | .env.test.local
16 | .env.production.local
17 |
18 | npm-debug.log*
19 | yarn-debug.log*
20 | yarn-error.log*
21 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # ShortGPT Documentation
2 | # Installation
3 |
4 | 1. `yarn install` in the root of this repository (two level above this directory).
5 | 1. In this directory, do `yarn start`.
6 | 1. A browser window will open up, pointing to the docs.
7 |
8 | # Deployment
9 |
10 | Vercel handles the deployment of this website.
11 |
--------------------------------------------------------------------------------
/docs/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
3 | };
4 |
--------------------------------------------------------------------------------
/docs/docs/api-key-manager.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ApiKeyManager in ShortGPT
3 | sidebar_label: ApiKeyManager
4 | ---
5 |
6 | # ApiKeyManager in ShortGPT
7 |
8 | ApiKeyManager is a class in the ShortGPT framework that manages the API keys for different providers. It interacts with the database to get and set API keys.
9 |
10 | ## Importing ApiKeyManager
11 |
12 | ```python
13 | from shortGPT.config.api_db import ApiKeyManager, ApiProvider
14 | ```
15 |
16 | ## Using ApiKeyManager
17 |
18 | ApiKeyManager provides two main methods: `get_api_key` and `set_api_key`.
19 |
20 | ### set_api_key
21 |
22 | This method is used to set the API key for a specific provider in the database. It takes two arguments: the key (provider name) and the value (API key).
23 |
24 | ```python
25 | ApiKeyManager.set_api_key(ApiProvider.OPENAI, "your_openai_key")
26 | ApiKeyManager.set_api_key(ApiProvider.ELEVEN_LABS, "your_eleven_labs_key")
27 | ```
28 |
29 | In the above example, we are setting the API keys for OPENAI and ELEVEN_LABS.
30 |
31 | ### get_api_key
32 |
33 | This method is used to retrieve the API key for a specific provider from the database. It takes one argument: the key (provider name).
34 |
35 | ```python
36 | openai_key = ApiKeyManager.get_api_key(ApiProvider.OPENAI)
37 | eleven_labs_key = ApiKeyManager.get_api_key(ApiProvider.ELEVEN_LABS)
38 | ```
39 | In the above example, we are retrieving the API keys for OPENAI and ELEVEN_LABS.
40 |
41 | ## Note
42 |
43 | The `key` argument in both methods can either be a string or an instance of the `ApiProvider` enum. If it is an instance of `ApiProvider`, the `value` attribute of the enum instance will be used as the key.
44 |
45 | ```python
46 | ApiKeyManager.set_api_key("OPENAI_API_KEY", "your_openai_key")
47 | ApiKeyManager.set_api_key("ELEVENLABS_API_KEY", "your_eleven_labs_key")
48 |
49 | openai_key = ApiKeyManager.get_api_key("OPENAI_API_KEY")
50 | eleven_labs_key = ApiKeyManager.get_api_key("ELEVENLABS_API_KEY")
51 | ```
52 | In the above example, we are setting and retrieving the API keys using string keys instead of `ApiProvider` instances.
--------------------------------------------------------------------------------
/docs/docs/asset-database.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: AssetDatabase in ShortGPT
3 | sidebar_label: AssetDatabase
4 | ---
5 |
6 | # AssetDatabase in ShortGPT
7 |
8 | The `AssetDatabase` in ShortGPT is a powerful tool that allows you to manage both local and remote assets. This guide will provide you with examples of how to use the `AssetDatabase`.
9 |
10 | ## Importing AssetDatabase and AssetType
11 |
12 | ```python
13 | from shortGPT.config.asset_db import AssetDatabase, AssetType
14 | ```
15 |
16 | ## Adding Assets
17 |
18 | You can add both remote and local assets to the `AssetDatabase`.
19 |
20 | ### Adding Remote Assets
21 |
22 | ```python
23 | AssetDatabase.add_remote_asset("minecraft background cube", AssetType.BACKGROUND_VIDEO, "https://www.youtube.com/watch?v=Pt5_GSKIWQM")
24 | AssetDatabase.add_remote_asset('chill music', AssetType.BACKGROUND_MUSIC, "https://www.youtube.com/watch?v=uUu1NcSHg2E")
25 | ```
26 |
27 | ### Adding Local Assets
28 |
29 | ```python
30 | AssetDatabase.add_local_asset('my_music', AssetType.AUDIO, "./my_music.wav")
31 | ```
32 |
33 | ## Asset Types
34 |
35 | The `AssetType` enum is used to specify the type of asset being added to the `AssetDatabase`. The available asset types are:
36 |
37 | - VIDEO
38 | - AUDIO
39 | - IMAGE
40 | - BACKGROUND_MUSIC
41 | - BACKGROUND_VIDEO
42 | - OTHER
43 |
44 | ## Getting Asset Information
45 |
46 | You can retrieve information about an asset using the following methods:
47 |
48 | ### Get Asset Duration
49 |
50 | This method returns the duration in seconds of a video or audio asset. If the asset is neither video nor audio, it returns `None`.
51 |
52 | ```python
53 | AssetDatabase.get_asset_duration('minecraft background cube')
54 | ```
55 |
56 | ### Get Asset Link
57 |
58 | This method returns a source URL, or the path of the resource. If the asset is a YouTube video or audio, it uses `yt-dlp` to extract a download URL or a direct video/audio link.
59 |
60 | ```python
61 | AssetDatabase.get_asset_link('minecraft background cube')
62 | ```
63 |
64 | ## Synchronizing Local Assets
65 |
66 | The `sync_local_assets` method synchronizes the database with local assets found in the `/public` folder. If it doesn't find one, it doesn't do anything.
67 |
68 | ```python
69 | AssetDatabase.sync_local_assets()
70 | ```
71 |
72 | ## Removing Assets
73 |
74 | You can remove an asset from the database by providing its name to the `remove_asset` method.
75 |
76 | ```python
77 | AssetDatabase.remove_asset('name')
78 | ```
79 |
80 | ## Getting Database State
81 |
82 | You can get the state of the asset database as a pandas dataframe using the `get_df` method.
83 |
84 | ```python
85 | AssetDatabase.get_df()
86 | ```
87 |
88 | This method returns a dataframe that includes the name, type, link, source, and timestamp of each asset in the database.
--------------------------------------------------------------------------------
/docs/docs/content-translation-engine.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ContentTranslationEngine
3 | sidebar_label: ContentTranslationEngine
4 | ---
5 |
6 | The `ContentTranslationEngine` in ShortGPT is a powerful tool that automates the process of translating video content. This guide will provide you with an overview of how to use the `ContentTranslationEngine`.
7 |
8 | ## Importing ContentTranslationEngine
9 |
10 | ```python
11 | from shortGPT.engine.content_translation_engine import ContentTranslationEngine
12 | ```
13 |
14 | ## Initializing ContentTranslationEngine
15 |
16 | The `ContentTranslationEngine` requires a `VoiceModule`, a source URL (either a local video file path or a YouTube link), a target language, and an optional flag indicating whether to use captions for translation.
17 |
18 | ```python
19 | content_engine = ContentTranslationEngine(voice_module, src_url, target_language, use_captions=False)
20 | ```
21 |
22 | ## Example
23 |
24 | ```python
25 | from shortGPT.config.api_db import ApiKeyManager, ApiProvider
26 | from shortGPT.engine.content_translation_engine import ContentTranslationEngine
27 | from shortGPT.config.languages import Language
28 | from shortGPT.audio.edge_voice_module import EdgeTTSVoiceModule, EDGE_TTS_VOICENAME_MAPPING
29 |
30 | # Set API Keys
31 | ApiKeyManager.set_api_key(ApiProvider.OPENAI, "your_openai_key")
32 | ApiKeyManager.set_api_key(ApiProvider.ELEVEN_LABS, "your_eleven_labs_key")
33 |
34 | # Configure the Voice Module
35 | voice_name = EDGE_TTS_VOICENAME_MAPPING[Language.SPANISH]['male']
36 | voice_module = EdgeTTSVoiceModule(voice_name)
37 |
38 | # Configure Content Engine
39 | src_url = "https://www.youtube.com/watch?v=QQz5hj8y1TE"
40 | target_language = Language.SPANISH
41 | use_captions = False
42 | content_engine = ContentTranslationEngine(voice_module, src_url, target_language, use_captions)
43 |
44 | # Generate Content
45 | for step_num, step_logs in content_engine.makeContent():
46 | print(f" {step_logs}")
47 |
48 | # Get Video Output Path
49 | print(content_engine.get_video_output_path())
50 | ```
51 |
52 | ## How ContentTranslationEngine Works
53 |
54 | The `ContentTranslationEngine` works by executing a series of steps defined in the `stepDict` dictionary. Each step is a method that performs a specific task in the video translation process. Here's what each step does:
55 |
56 | 1. `_transcribe_audio`: Transcribes the audio from the source video
57 | 2. `_translate_content`: Translates the transcribed content from the source language to the target language.
58 | 3. `_generate_translated_audio`: Generates translated audio using the translated content and the specified `VoiceModule`.
59 | 4. `_edit_and_render_video`: Edits and renders the translated video.
60 | 5. `_add_metadata`: Adds metadata to the translated video.
61 |
62 | ## Providing a Source URL
63 |
64 | The `ContentTranslationEngine` requires a source URL, which can be either a local video file path or a YouTube link for a youtube Video, or a Youtube Shorts. The engine uses this source URL to retrieve the audio and video content for translation.
65 |
66 | ## Using Captions for Translation
67 |
68 | Set the `use_captions` flag to `True` to see text captions on the video generated that are timed with the audio voice.
69 |
70 |
--------------------------------------------------------------------------------
/docs/docs/content-video-engine.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ContentVideoEngine
3 | sidebar_label: ContentVideoEngine
4 | ---
5 |
6 | The `ContentVideoEngine` in ShortGPT is a powerful tool that encapsulates all the automation required to create a video. This guide will provide you with an overview of how to use the `ContentVideoEngine`.
7 |
8 | ## Importing ContentVideoEngine
9 |
10 | ```python
11 | from shortGPT.engine.content_video_engine import ContentVideoEngine
12 | ```
13 |
14 | ## Initializing ContentVideoEngine
15 |
16 | The `ContentVideoEngine` requires a `VoiceModule`, a script, and optionally a background music name, a watermark (string with the name of your channel / brand), a flag indicating whether the video you want is in vertical format, and a language.
17 |
18 | ```python
19 | content_engine = ContentVideoEngine(voice_module, script, background_music_name="", watermark=None, isVerticalFormat=False, language=Language.ENGLISH)
20 | ```
21 | ## Example
22 |
23 | ```python
24 | from shortGPT.config.api_db import ApiKeyManager, ApiProvider
25 | from shortGPT.config.asset_db import AssetDatabase, AssetType
26 | from shortGPT.engine.content_video_engine import ContentVideoEngine
27 | from shortGPT.config.languages import Language
28 | from shortGPT.audio.edge_voice_module import EdgeTTSVoiceModule, EDGE_TTS_VOICENAME_MAPPING
29 |
30 | # Set API Keys
31 | ApiKeyManager.set_api_key(ApiProvider.OPENAI, "your_openai_key")
32 | ApiKeyManager.set_api_key(ApiProvider.PEXELS, "your_pexels_key")
33 |
34 | # Add Assets
35 | AssetDatabase.add_remote_asset('chill music', AssetType.BACKGROUND_MUSIC, "https://www.youtube.com/watch?v=uUu1NcSHg2E")
36 |
37 | # Configure the Voice Module
38 | voice_name = EDGE_TTS_VOICENAME_MAPPING[Language.SPANISH]['male']
39 | voice_module = EdgeTTSVoiceModule(voice_name)
40 |
41 | # Prepare the script
42 | script = "La inteligencia artificial (IA) está revolucionando nuestro mundo de manera sorprendente. Los robots y asistentes virtuales nos ayudan en nuestras tareas diarias y simplifican nuestra vida. En la medicina, la IA permite diagnósticos más precisos y avances en tratamientos. En la industria automotriz, los vehículos autónomos están cambiando la forma en que nos desplazamos. Sin embargo, surgen interrogantes sobre el impacto en el empleo y la ética de su uso. A pesar de los desafíos, la IA promete un futuro emocionante y lleno de posibilidades. ¿Estamos preparados para abrazar este avance tecnológico?"
43 |
44 | # Configure Content Engine
45 | content_engine = ContentVideoEngine(voice_module, script, background_music_name='chill music', language=Language.SPANISH)
46 |
47 | # Generate Content
48 | for step_num, step_logs in content_engine.makeContent():
49 | print(f" {step_logs}")
50 |
51 | # Get Video Output Path
52 | print(content_engine.get_video_output_path())
53 | ```
54 |
55 | In this example, we first set the API keys for OpenAI, and Pexels. We then add a remote asset for background music. We configure the voice module to use EdgeTTS for voice synthesis. We prepare a script for the video and then configure the `ContentVideoEngine` with the voice module, script, and background music. We then generate the content and print the output path of the video.
56 | ## How ContentVideoEngine Works
57 |
58 | The `ContentVideoEngine` works by executing a series of steps defined in the `stepDict` dictionary. Each step is a method that performs a specific task in the video creation process. Here's what each step does:
59 |
60 | 1. `_generateTempAudio`: Generates a temporary audio file from the provided script using the specified `VoiceModule`.
61 | 2. `_speedUpAudio`: Speeds up the generated audio file to match the pace of a typical video.
62 | 3. `_timeCaptions`: Generates timed captions for the video based on the script.
63 | 4. `_generateVideoSearchTerms`: Generates search terms to find relevant videos on Pexels based on the script.
64 | 5. `_generateVideoUrls`: Retrieves video URLs from Pexels using the generated search terms.
65 | 6. `_chooseBackgroundMusic`: Chooses background music for the video.
66 | 7. `_prepareBackgroundAssets`: Prepares the background assets for the video.
67 | 8. `_prepareCustomAssets`: Prepares any custom assets for the video.
68 | 9. `_editAndRenderShort`: Edits and renders the video.
69 | 10. `_addMetadata`: Adds metadata to the video.
70 |
71 | ## Using Pexels API
72 |
73 | The `ContentVideoEngine` sources video assets from the Pexels API. To use it, you need to provide your Pexels API key. The engine uses this key to retrieve relevant videos based on the search terms generated from the script.
74 |
75 | ## Providing a Script
76 |
77 | The `ContentVideoEngine` requires a script to generate the video. The script is used to generate the audio, captions, and search terms for sourcing videos from Pexels. The script should be a string containing the narration for the video.
--------------------------------------------------------------------------------
/docs/docs/facts-short-engine.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: FactsShortEngine
3 | sidebar_label: FactsShortEngine
4 | ---
5 |
6 | The `FactsShortEngine` in ShortGPT is a content engine specifically designed for generating short videos that present interesting facts. This guide will provide you with an overview of how to use the `FactsShortEngine`.
7 |
8 | ## Importing FactsShortEngine
9 |
10 | ```python
11 | from shortGPT.engine.facts_short_engine import FactsShortEngine
12 | ```
13 |
14 | ## Initializing FactsShortEngine
15 |
16 | The `FactsShortEngine` requires a `VoiceModule`, the type of facts you want to generate, a background video name, a background music name, the number of images to include in the video, a watermark (string with the name of your channel / brand), and a language.
17 |
18 | ```python
19 | content_engine = FactsShortEngine(voice_module, facts_type, background_video_name, background_music_name, num_images=None, watermark=None, language=Language.ENGLISH)
20 | ```
21 |
22 | ## Example
23 |
24 | ```python
25 | from shortGPT.config.api_db import ApiKeyManager, ApiProvider
26 | from shortGPT.config.asset_db import AssetDatabase, AssetType
27 | from shortGPT.engine.facts_short_engine import FactsShortEngine
28 | from shortGPT.config.languages import Language
29 | from shortGPT.audio.edge_voice_module import EdgeTTSVoiceModule, EDGE_TTS_VOICENAME_MAPPING
30 |
31 | # Set API Keys
32 | ApiKeyManager.set_api_key(ApiProvider.OPENAI, "your_openai_key")
33 |
34 | # Add Assets
35 | AssetDatabase.add_remote_asset("minecraft background cube", AssetType.BACKGROUND_VIDEO, "https://www.youtube.com/watch?v=Pt5_GSKIWQM")
36 | AssetDatabase.add_remote_asset('chill music', AssetType.BACKGROUND_MUSIC, "https://www.youtube.com/watch?v=uUu1NcSHg2E")
37 |
38 | # Configure the Voice Module
39 | voice_name = EDGE_TTS_VOICENAME_MAPPING[Language.GERMAN]['male']
40 | voice_module = EdgeTTSVoiceModule(voice_name)
41 |
42 | # Configure Content Engine
43 | facts_video_topic = "Interesting scientific facts from the 19th century"
44 | content_engine = FactsShortEngine(voice_module=voice_module,
45 | facts_type=facts_video_topic,
46 | background_video_name="minecraft background cube", # <--- use the same name you saved in the AssetDatabase
47 | background_music_name='chill music', # <--- use the same name you saved in the AssetDatabase
48 | num_images=5, # If you don't want images in your video, put 0 or None
49 | language=Language.GERMAN)
50 |
51 | # Generate Content
52 | for step_num, step_logs in content_engine.makeContent():
53 | print(f" {step_logs}")
54 |
55 | # Get Video Output Path
56 | print(content_engine.get_video_output_path())
57 | ```
58 |
59 | In this example, we first set the API keys for OpenAI. We then add remote assets for the background video and background music. We configure the voice module to use EdgeTTS for voice synthesis. We configure the `FactsShortEngine` with the voice module, facts type, background video name, background music name, number of images, and language. We then generate the content and print the output path of the video.
60 |
61 | ## How FactsShortEngine Works
62 |
63 | The `FactsShortEngine` works by executing a series of steps defined in the `stepDict` dictionary. Each step is a method that performs a specific task in the video creation process. Here's what each step does:
64 |
65 | 1. `_generateScript`: Generates the script for the facts short using the provided `facts_type`.
66 | 2. `_generateTempAudio`: Generates a temporary audio file from the generated script using the specified `VoiceModule`.
67 | 3. `_speedUpAudio`: Speeds up the generated audio file to match the pace of a typical video.
68 | 4. `_timeCaptions`: Generates timed captions for the video based on the script.
69 | 5. `_generateImageSearchTerms`: Generates search terms to find relevant images using the Bing search engine based on the script.
70 | 6. `_generateImageUrls`: Retrieves image URLs from Bing using the generated search terms.
71 | 7. `_chooseBackgroundMusic`: Chooses background music for the video.
72 | 8. `_chooseBackgroundVideo`: Chooses a background video for the video.
73 | 9. `_prepareBackgroundAssets`: Prepares the background assets for the video.
74 | 10. `_prepareCustomAssets`: Prepares any custom assets for the video.
75 | 11. `_editAndRenderShort`: Edits and renders the video.
76 | 12. `_addYoutubeMetadata`: Adds metadata to the video.
77 |
78 |
79 | ## Providing a Facts Type
80 |
81 | The `FactsShortEngine` requires a facts type to generate the script. The facts type should be a string indicating the specific category or topic of facts you want to include in the video.
82 |
83 |
84 | That's it! You have now successfully generated a facts short video using the FactsShortEngine in the ShortGPT framework.
--------------------------------------------------------------------------------
/docs/docs/getting-started.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: ShortGPT Hello World Example
3 | sidebar_label: ShortGPT Hello World Example
4 | ---
5 | # ShortGPT Hello World Example
6 |
7 | This guide provides a basic example of how to use the shortGPT framework. ShortGPT encapsulates the entire process of content automation into `content engines`. In this example, we'll show you how to instantiate the FactsShortEngine, which will automate the production of the "Interesting Facts" niche of Shorts.
8 |
9 | ## Prerequisites
10 |
11 | Before you start, make sure you have [followed the installation steps](./how-to-install) and have your API keys ready.
12 |
13 | ## Code
14 |
15 | ```python
16 | from shortGPT.config.api_db import ApiKeyManager, ApiProvider
17 | from shortGPT.config.asset_db import AssetDatabase, AssetType
18 | from shortGPT.engine.facts_short_engine import FactsShortEngine
19 | from shortGPT.audio.eleven_voice_module import ElevenLabsVoiceModule
20 | from shortGPT.config.languages import Language
21 | from shortGPT.audio.edge_voice_module import EdgeTTSVoiceModule, EDGE_TTS_VOICENAME_MAPPING
22 |
23 | # Set API Keys
24 | ApiKeyManager.set_api_key(ApiProvider.OPENAI, "your_openai_key")
25 | ApiKeyManager.set_api_key(ApiProvider.ELEVEN_LABS, "your_eleven_labs_key")
26 |
27 | # Add Assets
28 | AssetDatabase.add_remote_asset("minecraft background cube", AssetType.BACKGROUND_VIDEO, "https://www.youtube.com/watch?v=Pt5_GSKIWQM")
29 | AssetDatabase.add_remote_asset('chill music', AssetType.BACKGROUND_MUSIC, "https://www.youtube.com/watch?v=uUu1NcSHg2E")
30 | AssetDatabase.add_local_asset('my_music', AssetType.AUDIO, "./my_music.wav")
31 |
32 | USE_ELEVEN_LABS = False
33 | # Configure the ElevenLabs Voice Module
34 | if USE_ELEVEN_LABS:
35 | eleven_labs_key = ApiKeyManager.get_api_key(ApiProvider.ELEVEN_LABS)
36 | voice_module = ElevenLabsVoiceModule(api_key = eleven_labs_key, voiceName="Chris")
37 | else:
38 | ## You can also use the EdgeTTS for Free voice synthesis
39 | voice_name = EDGE_TTS_VOICENAME_MAPPING[Language.GERMAN]['male']
40 | voice_module = EdgeTTSVoiceModule(voice_name)
41 |
42 | # Configure Content Engine
43 | facts_video_topic = "Interesting scientific facts from the 19th century"
44 | content_engine = FactsShortEngine(voiceModule=voice_module,
45 | facts_type=facts_video_topic,
46 | background_video_name="minecraft background cube", # <--- use the same name you saved in the AssetDatabase
47 | background_music_name='chill music', # <--- use the same name you saved in the AssetDatabase
48 | num_images=5, # If you don't want images in your video, put 0 or None
49 | language=Language.GERMAN)
50 |
51 | # Generate Content
52 | for step_num, step_logs in content_engine.makeContent():
53 | print(f" {step_logs}")
54 |
55 | # Get Video Output Path
56 | print(content_engine.get_video_output_path())
57 | ```
58 |
59 | That's it! You have now successfully generated your first content using the shortGPT framework.
60 |
--------------------------------------------------------------------------------
/docs/docs/how-to-install.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: Step-by-Step Guide to Installing ShortGPT
3 | sidebar_label: Installation Guide
4 | ---
5 | import Tabs from '@theme/Tabs';
6 | import TabItem from '@theme/TabItem';
7 |
8 | # Launching Your ShortGPT Experience
9 |
10 | This guide will walk you through the process of setting up your machine to run the **ShortGPT** library. The setup requires one component: FFmpeg. Follow the steps below to get these dependencies installed.
11 |
12 | ## Before You Begin
13 |
14 | Make sure you have the following installed on your machine:
15 |
16 | - Python 3.x
17 | - Pip (Python package installer)
18 |
19 | ## Installation Process
20 |
21 | Here are the steps to install FFmpeg, and the ShortGPT library.
22 |
23 |
24 |
25 |
26 | After downloading, follow the installation instructions provided on the website.
27 |
28 | ### Step 1: Install FFmpeg (Essential for ShortGPT)
29 |
30 | FFmpeg is another key component for ShortGPT. Download the FFmpeg binaries from the link below:
31 |
32 | > **[👉 Download FFmpeg Here (click on
33 | FFmpeg_Full.msi ) 👈](https://github.com/icedterminal/ffmpeg-installer/releases/tag/6.0.0.20230306)**
34 |
35 | The download will include ffmpeg and ffprobe and will add it to your path. Follow the installation instructions as guided.
36 |
37 | Step 3: Install ShortGPT Library
38 |
39 | - Open a terminal or command prompt.
40 | - Execute the following command:
41 |
42 | ```bash
43 | pip install --upgrade shortgpt
44 | ```
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | ### Step 1: Install FFmpeg (Essential for ShortGPT)
55 |
56 | Run the command below in your command line:
57 |
58 | ```bash
59 | brew install ffmpeg
60 | ```
61 |
62 |
63 | Step 3: Install ShortGPT Library
64 |
65 | - Open a terminal or command prompt.
66 | - Execute the following command:
67 |
68 | ```bash
69 | pip install --upgrade shortgpt
70 | ```
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 | ### Step 1: Install FFmpeg
80 |
81 | Execute the following command:
82 |
83 | ```bash
84 | sudo apt-get install ffmpeg
85 | ```
86 |
87 |
88 | Step 3: Install ShortGPT Library
89 |
90 | - Open a terminal or command prompt.
91 | - Execute the following command:
92 |
93 | ```bash
94 | pip install --upgrade shortgpt
95 | ```
96 |
97 |
98 |
99 |
100 |
101 |
102 | And there you have it! Your machine is now ready to run ShortGPT. Dive into the world of automated video content creation with ShortGPT!
--------------------------------------------------------------------------------
/docs/docusaurus.config.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable @typescript-eslint/no-var-requires */
2 | const darkCodeTheme = require('prism-react-renderer/themes/dracula');
3 | const lightCodeTheme = require('prism-react-renderer/themes/github');
4 |
5 | // With JSDoc @type annotations, IDEs can provide config autocompletion
6 | /** @type {import('@docusaurus/types').DocusaurusConfig} */
7 | (
8 | module.exports = {
9 | title: 'ShortGPT',
10 | tagline:
11 | 'Open-Source Framework for AI content automation',
12 | url: 'https://dev.shortgpt.ai',
13 | baseUrl: '/',
14 | favicon: 'img/favicon.ico',
15 | organizationName: 'RayVentura',
16 | projectName: 'ShortGPT',
17 | onBrokenLinks: 'throw',
18 | onBrokenMarkdownLinks: 'throw',
19 | presets: [
20 | [
21 | '@docusaurus/preset-classic',
22 | /** @type {import('@docusaurus/preset-classic').Options} */
23 | ({
24 | docs: {
25 | path: 'docs',
26 | sidebarPath: 'sidebars.js',
27 | editUrl:
28 | 'https://github.com/RayVentura/ShortGPT/edit/stable/docs/',
29 | versions: {
30 | current: {
31 | label: 'current',
32 | },
33 | },
34 | lastVersion: 'current',
35 | showLastUpdateAuthor: true,
36 | showLastUpdateTime: true,
37 | },
38 | theme: {
39 | customCss: require.resolve('./src/css/custom.css'),
40 | },
41 | }),
42 | ],
43 | ],
44 | plugins: ['tailwind-loader'],
45 | themeConfig:
46 | /** @type {import('@docusaurus/preset-classic').ThemeConfig} */
47 | ({
48 |
49 | navbar: {
50 | hideOnScroll: true,
51 | logo: {
52 | alt: 'ShortGPT',
53 | src: 'img/logo.png',
54 | },
55 | items: [
56 | // left
57 | {
58 | label: 'Docs',
59 | to: 'docs/how-to-install',
60 | position: 'right',
61 | },
62 | // right
63 | {
64 | type: 'docsVersionDropdown',
65 | position: 'right',
66 | },
67 | {
68 | href: 'https://github.com/RayVentura/ShortGPT',
69 | position: 'right',
70 | className: 'header-github-link',
71 | },
72 | ],
73 | },
74 | colorMode: {
75 | defaultMode: 'light',
76 | disableSwitch: false,
77 | respectPrefersColorScheme: true,
78 | },
79 | announcementBar: {
80 | content:
81 | '⭐️ If you like ShortGPT, give it a star on GitHub! ⭐️',
82 | },
83 | footer: {
84 | links: [
85 | {
86 | title: 'Docs',
87 | items: [
88 | {
89 | label: 'Getting Started',
90 | to: 'docs/how-to-install',
91 | },
92 |
93 | ],
94 | },
95 | {
96 | title: 'ShortGPT',
97 | items: [
98 | {
99 | label: 'Issues',
100 | to: 'https://github.com/RayVentura/ShortGPT/issues',
101 | },
102 | ],
103 | },
104 | {
105 | title: 'Community',
106 | items: [
107 | {
108 | label: 'Discord',
109 | to: 'https://discord.com/invite/bRTacwYrfX',
110 | },
111 | ],
112 | },
113 | {
114 | title: 'Social',
115 | items: [
116 | {
117 | label: 'GitHub',
118 | to: 'https://github.com/RayVentura/ShortGPT',
119 | },
120 | {
121 | label: 'Twitter',
122 | to: 'https://twitter.com/RayVenturaHQ',
123 | },
124 | ],
125 | },
126 | ],
127 | copyright: `ShortGPT ${new Date().getFullYear()}`,
128 | },
129 | prism: {
130 | theme: lightCodeTheme,
131 | darkTheme: darkCodeTheme,
132 | },
133 | }),
134 | }
135 | );
136 |
--------------------------------------------------------------------------------
/docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "shortgpt-documentation",
3 | "version": "3.5.1",
4 | "private": true,
5 | "scripts": {
6 | "build:clean": "rm -rf dist build .docusaurus node_modules",
7 | "docusaurus": "docusaurus",
8 | "start": "docusaurus start",
9 | "build": "docusaurus build",
10 | "swizzle": "docusaurus swizzle",
11 | "deploy": "docusaurus deploy",
12 | "clear": "docusaurus clear",
13 | "serve": "docusaurus serve",
14 | "write-translations": "docusaurus write-translations",
15 | "write-heading-ids": "docusaurus write-heading-ids"
16 | },
17 | "dependencies": {
18 | "@algolia/ui-library": "9.10.2",
19 | "@docsearch/react": "3.5.1",
20 | "@docusaurus/core": "2.4.1",
21 | "@docusaurus/preset-classic": "2.4.1",
22 | "@mdx-js/react": "^1.6.22",
23 | "clsx": "^1.1.1",
24 | "file-loader": "6.2.0",
25 | "my-loaders": "file:plugins/my-loaders",
26 | "postcss": "8.4.25",
27 | "postcss-import": "15.0.0",
28 | "postcss-preset-env": "7.8.2",
29 | "prism-react-renderer": "1.2.1",
30 | "react": "^18.2.0",
31 | "react-dom": "^18.2.0",
32 | "tailwind-loader": "file:plugins/tailwind-loader",
33 | "url-loader": "4.1.1"
34 | },
35 | "devDependencies": {
36 | "postcss-loader": "6.2.1",
37 | "tailwindcss": "npm:@tailwindcss/postcss7-compat"
38 | },
39 | "browserslist": {
40 | "production": [
41 | ">0.5%",
42 | "not dead",
43 | "not op_mini all"
44 | ],
45 | "development": [
46 | "last 1 chrome version",
47 | "last 1 firefox version",
48 | "last 1 safari version"
49 | ]
50 | }
51 | }
--------------------------------------------------------------------------------
/docs/plugins/my-loaders/index.js:
--------------------------------------------------------------------------------
1 | module.exports = function () {
2 | return {
3 | name: 'loaders',
4 | configureWebpack() {
5 | return {
6 | module: {
7 | rules: [
8 | {
9 | test: /\.(gif|png|jpe?g|svg)$/i,
10 | exclude: /\.(mdx?)$/i,
11 | use: ['file-loader', { loader: 'image-webpack-loader' }],
12 | },
13 | ],
14 | },
15 | };
16 | },
17 | };
18 | };
19 |
--------------------------------------------------------------------------------
/docs/plugins/tailwind-loader/index.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable @typescript-eslint/no-var-requires */
2 | module.exports = function () {
3 | return {
4 | name: 'postcss-tailwindcss-loader',
5 | configurePostCss(postcssOptions) {
6 | postcssOptions.plugins.push(
7 | require('postcss-import'),
8 | require('tailwindcss'),
9 | require('postcss-preset-env')({
10 | autoprefixer: {
11 | flexbox: 'no-2009',
12 | },
13 | stage: 4,
14 | })
15 | );
16 | return postcssOptions;
17 | },
18 | };
19 | };
20 |
--------------------------------------------------------------------------------
/docs/sidebars.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Creating a sidebar enables you to:
3 | * - create an ordered group of docs
4 | * - render a sidebar for each doc of that group
5 | * - provide next/previous navigation.
6 | *
7 | * The sidebars can be generated from the filesystem, or explicitly defined here.
8 | *
9 | * Create as many sidebars as you want.
10 | */
11 |
12 | module.exports = {
13 | docs: [
14 | {
15 | type: 'category',
16 | label: 'Introduction',
17 | collapsed: false,
18 | items: ['how-to-install', 'getting-started'],
19 | },
20 | {
21 | type: 'category',
22 | label: 'Content Engines',
23 | collapsed: false,
24 | items: ['content-video-engine', 'content-translation-engine', 'facts-short-engine'],
25 | },
26 | {
27 | type: 'category',
28 | label: 'API Key and Asset',
29 | collapsed: false,
30 | items: ['api-key-manager', 'asset-database'],
31 | },
32 | ],
33 | };
34 |
--------------------------------------------------------------------------------
/docs/src/pages/index.js:
--------------------------------------------------------------------------------
1 | import Layout from '@theme/Layout';
2 | import React from 'react';
3 |
4 | import Home from '../components/Home';
5 |
6 | function HomePage() {
7 | return (
8 |
12 |
13 |
14 | );
15 | }
16 |
17 | export default HomePage;
18 |
--------------------------------------------------------------------------------
/docs/static/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/docs/static/img/favicon.ico
--------------------------------------------------------------------------------
/docs/static/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/docs/static/img/logo.png
--------------------------------------------------------------------------------
/docs/tailwind.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | purge: ['./src/**/*.html', './src/**/*.js', './src/**/*.tsx'],
3 | corePlugins: { preflight: false, container: false },
4 | important: '#tailwind',
5 | theme: {
6 | extend: {
7 | maxWidth: {
8 | xxs: '18rem',
9 | },
10 | },
11 | },
12 | };
13 |
--------------------------------------------------------------------------------
/fonts/LuckiestGuy-Regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/fonts/LuckiestGuy-Regular.ttf
--------------------------------------------------------------------------------
/fonts/ObelixProB-cyr.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/fonts/ObelixProB-cyr.ttf
--------------------------------------------------------------------------------
/fonts/Roboto-Bold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/fonts/Roboto-Bold.ttf
--------------------------------------------------------------------------------
/gui/__pycache__/content_automation_ui.cpython-39.pyc.1849492106672:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/gui/__pycache__/content_automation_ui.cpython-39.pyc.1849492106672
--------------------------------------------------------------------------------
/gui/__pycache__/short_automation_ui.cpython-39.pyc.1870860912944:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/gui/__pycache__/short_automation_ui.cpython-39.pyc.1870860912944
--------------------------------------------------------------------------------
/gui/asset_components.py:
--------------------------------------------------------------------------------
1 | import os
2 | import platform
3 | import random
4 | import subprocess
5 |
6 | import gradio as gr
7 |
8 | from shortGPT.api_utils.eleven_api import ElevenLabsAPI
9 | from shortGPT.config.api_db import ApiKeyManager
10 | from shortGPT.config.asset_db import AssetDatabase
11 |
12 |
13 | class AssetComponentsUtils:
14 | EDGE_TTS = "Free EdgeTTS (lower quality)"
15 | ELEVEN_TTS = "ElevenLabs(Very High Quality)"
16 |
17 |
18 | instance_background_video_checkbox = None
19 | instance_background_music_checkbox = None
20 | instance_voiceChoice: dict[gr.Radio] = {}
21 | instance_voiceChoiceTranslation: dict[gr.Radio] = {}
22 |
23 | @classmethod
24 | def getBackgroundVideoChoices(cls):
25 | df = AssetDatabase.get_df()
26 | choices = list(df.loc["background video" == df["type"]]["name"])[:20]
27 | return choices
28 |
29 | @classmethod
30 | def getBackgroundMusicChoices(cls):
31 | df = AssetDatabase.get_df()
32 | choices = list(df.loc["background music" == df["type"]]["name"])[:20]
33 | return choices
34 |
35 | @classmethod
36 | def getElevenlabsVoices(cls):
37 | api_key = ApiKeyManager.get_api_key("ELEVENLABS_API_KEY")
38 | voices = list(reversed(ElevenLabsAPI(api_key).get_voices().keys()))
39 | return voices
40 |
41 | @classmethod
42 | def start_file(cls, path):
43 | if platform.system() == "Windows":
44 | os.startfile(path)
45 | elif platform.system() == "Darwin":
46 | subprocess.Popen(["open", path])
47 | else:
48 | subprocess.Popen(["xdg-open", path])
49 |
50 | @classmethod
51 | def background_video_checkbox(cls):
52 | if cls.instance_background_video_checkbox is None:
53 | choices = cls.getBackgroundVideoChoices()
54 | cls.instance_background_video_checkbox = gr.CheckboxGroup(
55 | choices=choices,
56 | interactive=True,
57 | label="Choose background video",
58 | value=random.choice(choices)
59 | )
60 | return cls.instance_background_video_checkbox
61 |
62 | @classmethod
63 | def background_music_checkbox(cls):
64 | if cls.instance_background_music_checkbox is None:
65 | choices = cls.getBackgroundMusicChoices()
66 | cls.instance_background_music_checkbox = gr.CheckboxGroup(
67 | choices=choices,
68 | interactive=True,
69 | label="Choose background music",
70 | value=random.choice(choices)
71 | )
72 | return cls.instance_background_music_checkbox
73 |
74 | @classmethod
75 | def voiceChoice(cls, provider: str = None):
76 | if provider == None:
77 | provider = cls.ELEVEN_TTS
78 | if cls.instance_voiceChoice.get(provider, None) is None:
79 | if provider == cls.ELEVEN_TTS:
80 | cls.instance_voiceChoice[provider] = gr.Radio(
81 | cls.getElevenlabsVoices(),
82 | label="Elevenlabs voice",
83 | value="Chris",
84 | interactive=True,
85 | )
86 | return cls.instance_voiceChoice[provider]
87 |
88 | @classmethod
89 | def voiceChoiceTranslation(cls, provider: str = None):
90 | if provider == None:
91 | provider = cls.ELEVEN_TTS
92 | if cls.instance_voiceChoiceTranslation.get(provider, None) is None:
93 | if provider == cls.ELEVEN_TTS:
94 | cls.instance_voiceChoiceTranslation[provider] = gr.Radio(
95 | cls.getElevenlabsVoices(),
96 | label="Elevenlabs voice",
97 | value="Chris",
98 | interactive=True,
99 | )
100 | return cls.instance_voiceChoiceTranslation[provider]
101 |
--------------------------------------------------------------------------------
/gui/content_automation_ui.py:
--------------------------------------------------------------------------------
1 | import time
2 | import gradio as gr
3 |
4 | from gui.ui_tab_short_automation import ShortAutomationUI
5 | from gui.ui_tab_video_automation import VideoAutomationUI
6 | from gui.ui_tab_video_translation import VideoTranslationUI
7 |
8 |
9 | class GradioContentAutomationUI:
10 | def __init__(self, shortGPTUI):
11 | self.shortGPTUI = shortGPTUI
12 | self.content_automation_ui = None
13 |
14 | def create_ui(self):
15 | '''Create Gradio interface'''
16 | with gr.Tab("Content Automation") as self.content_automation_ui:
17 | gr.Markdown("# 🏆 Content Automation 🚀")
18 | gr.Markdown("## Choose your desired automation task.")
19 | choice = gr.Radio(['🎬 Automate the creation of shorts', '🎞️ Automate a video with stock assets', '🌐 Automate multilingual video dubbing'], label="Choose an option")
20 | video_automation_ui = VideoAutomationUI(self.shortGPTUI).create_ui()
21 | short_automation_ui = ShortAutomationUI(self.shortGPTUI).create_ui()
22 | video_translation_ui = VideoTranslationUI(self.shortGPTUI).create_ui()
23 | def onChange(x):
24 | showShorts= x == choice.choices[0][0]
25 | showVideo = x == choice.choices[1][0]
26 | showTranslation= x == choice.choices[2][0]
27 | return gr.update(visible=showShorts), gr.update(visible=showVideo), gr.update(visible=showTranslation)
28 | choice.change(onChange, [choice], [short_automation_ui,video_automation_ui, video_translation_ui])
29 | return self.content_automation_ui
30 |
--------------------------------------------------------------------------------
/gui/gui_gradio.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 |
3 | from gui.content_automation_ui import GradioContentAutomationUI
4 | from gui.ui_abstract_base import AbstractBaseUI
5 | from gui.ui_components_html import GradioComponentsHTML
6 | from gui.ui_tab_asset_library import AssetLibrary
7 | from gui.ui_tab_config import ConfigUI
8 | from shortGPT.utils.cli import CLI
9 |
10 |
11 | class ShortGptUI(AbstractBaseUI):
12 | '''Class for the GUI. This class is responsible for creating the UI and launching the server.'''
13 |
14 | def __init__(self, colab=False):
15 | super().__init__(ui_name='gradio_shortgpt')
16 | self.colab = colab
17 | CLI.display_header()
18 |
19 | def create_interface(self):
20 | '''Create Gradio interface'''
21 | with gr.Blocks(theme=gr.themes.Default(spacing_size=gr.themes.sizes.spacing_sm), css="footer {visibility: hidden}", title="ShortGPT Demo") as shortGptUI:
22 | with gr.Row(variant='compact'):
23 | gr.HTML(GradioComponentsHTML.get_html_header())
24 |
25 | self.content_automation = GradioContentAutomationUI(shortGptUI).create_ui()
26 | self.asset_library_ui = AssetLibrary().create_ui()
27 | self.config_ui = ConfigUI().create_ui()
28 | return shortGptUI
29 |
30 | def launch(self):
31 | '''Launch the server'''
32 | shortGptUI = self.create_interface()
33 | if not getattr(self, 'colab', False):
34 | print("\n\n********************* STARTING SHORGPT **********************")
35 | print("\nShortGPT is running here 👉 http://localhost:31415\n")
36 | print("********************* STARTING SHORGPT **********************\n\n")
37 | shortGptUI.queue().launch(server_port=31415, height=1000, allowed_paths=["public/","videos/","fonts/"], share=self.colab, server_name="0.0.0.0")
38 |
39 |
40 |
41 | if __name__ == "__main__":
42 | app = ShortGptUI()
43 | app.launch()
44 |
45 |
46 | import signal
47 |
48 | def signal_handler(sig, frame):
49 | print("Closing Gradio server...")
50 | import gradio as gr
51 | gr.close_all()
52 | exit(0)
53 |
54 | signal.signal(signal.SIGINT, signal_handler)
--------------------------------------------------------------------------------
/gui/ui_abstract_base.py:
--------------------------------------------------------------------------------
1 |
2 | import gradio as gr
3 |
4 |
5 | class AbstractBaseUI:
6 | '''Base class for the GUI. This class is responsible for creating the UI and launching the server.'''
7 | max_choices = 20
8 | ui_asset_dataframe = gr.Dataframe(interactive=False)
9 | LOGO_PATH = "http://localhost:31415/gradio_api/file=public/logo.png"
10 | LOGO_DIM = 64
11 |
12 | def __init__(self, ui_name='default'):
13 | self.ui_name = ui_name
14 | self.content_automation = None
15 | self.asset_library_ui = None
16 | self.config_ui = None
17 |
18 | def create_interface(self):
19 | raise NotImplementedError
20 |
--------------------------------------------------------------------------------
/gui/ui_abstract_component.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class AbstractComponentUI:
4 | def create_ui(self):
5 | raise NotImplementedError
6 |
--------------------------------------------------------------------------------
/gui/ui_components_html.py:
--------------------------------------------------------------------------------
1 | class GradioComponentsHTML:
2 |
3 | @staticmethod
4 | def get_html_header() -> str:
5 | '''Create HTML for the header'''
6 | return '''
7 |
29 | '''
30 |
31 | @staticmethod
32 | def get_html_video_template(file_url_path, file_name, width="auto", height="auto"):
33 | """
34 | Generate an HTML code snippet for embedding and downloading a video.
35 |
36 | Parameters:
37 | file_url_path (str): The URL or path to the video file.
38 | file_name (str): The name of the video file.
39 | width (str, optional): The width of the video. Defaults to "auto".
40 | height (str, optional): The height of the video. Defaults to "auto".
41 |
42 | Returns:
43 | str: The generated HTML code snippet.
44 | """
45 | html = f'''
46 |
55 | '''
56 | return html
57 |
--------------------------------------------------------------------------------
/installation-notes.md:
--------------------------------------------------------------------------------
1 | ** Thanks for Son Tran for the fixes on the installation guide. Here are the recommanded steps for installing ShortGPT:
2 |
3 |
4 | ### You now need Docker to now run ShortGPT. If you can't run it with docker, please use the Google Colab.
5 | # To run ShortGPT docker:
6 |
7 |
8 | First make a .env file with the API keys like this:
9 |
10 | ```bash
11 | GEMINI_API_KEY=put_your_gemini_api_key_here
12 | OPENAI_API_KEY=sk-_put_your_openai_api_key_here
13 | ELEVENLABS_API_KEY=put_your_eleven_labs_api_key_here
14 | PEXELS_API_KEY=put_your_pexels_api_key_here
15 | ```
16 |
17 |
18 | To run Dockerfile do this:
19 | ```bash
20 | docker build -t short_gpt_docker:latest .
21 | docker run -p 31415:31415 --env-file .env short_gpt_docker:latest
22 | ```
23 | Export Docker image:
24 | ```bash
25 | docker save short_gpt_docker > short_gpt_docker.tar
26 | ```
27 |
28 |
29 |
30 |
31 |
32 | ### Here are the steps to install it from scratch on Linux, Debian 11 x64:
33 |
34 | In short, you need to use:
35 | - Python 3.10
36 | - openai package, then upgrade openai-whisper
37 | - ffmpeg 4.2.3
38 |
39 | ### 1. OS: Debian 11 x64
40 | ```bash
41 | sudo apt update && sudo apt upgrade
42 | sudo apt install wget git libltdl-dev libjpeg-dev libpng-dev libtiff-dev libgif-dev libfreetype6-dev liblcms2-dev libxml2-dev wget build-essential libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev libffi-dev zlib1g-dev
43 | ```
44 |
45 | ### 2. Install Python version: 3.10.3
46 | ```bash
47 | wget https://www.python.org/ftp/python/3.10.3/Python-3.10.3.tgz
48 | tar xzf Python-3.10.3.tgz
49 | cd Python-3.10.3
50 | ./configure --enable-optimizations
51 | make install
52 | ```
53 |
54 | To check the Python version, use this command:
55 | ```bash
56 | python3.10 -V
57 | ```
58 | To use pip, use this command:
59 | ```bash
60 | pip3.10 install
61 | ```
62 |
63 | ### 3. Install ffmpeg version: 4.2.3
64 | ShortGPT will accept this version of FFmpeg:
65 |
66 | 3.1. Install Build Dependencies:
67 |
68 | ```bash
69 | sudo apt update
70 | sudo apt build-dep ffmpeg
71 | ```
72 |
73 | 3.2. Clone FFmpeg Source Code:
74 |
75 | ```bash
76 | git clone https://git.ffmpeg.org/ffmpeg.git
77 | cd ffmpeg
78 | git checkout n4.2.3
79 | ```
80 |
81 | 3.3. Configure FFmpeg Build:
82 |
83 | ```bash
84 | ./configure --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-avisynth --enable-libopenmpt --enable-shared --disable-static
85 | ```
86 |
87 | This step checks for the necessary dependencies and configures the build based on your system.
88 |
89 | 3.4. Build FFmpeg:
90 |
91 | ```bash
92 | make -j$(nproc)
93 | ```
94 |
95 | This step may take some time as it compiles the FFmpeg source code.
96 |
97 | 3.5. Install FFmpeg:
98 |
99 | ```bash
100 | sudo make install
101 | ```
102 |
103 | 3.6. Verify Installation:
104 |
105 | ```bash
106 | ffmpeg -version
107 | ```
108 |
109 | This should display the version information, and you should see version 4.2.3.
110 |
111 | Optional: Update Library Cache:
112 |
113 | ```bash
114 | sudo ldconfig
115 | ```
116 |
117 | This updates the dynamic linker run-time bindings.
118 |
119 | That's it! You should now have FFmpeg version 4.2.3 installed on your Debian 11 system.
120 |
121 | If you are still facing with "libavdevice.so.58" error when running ffmpeg, run this command to fix it, remember to change the path:
122 | ```bash
123 | echo 'export LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64/:/usr/local/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH' >> ~/.bashrc
124 | source ~/.bashrc
125 | ```
126 |
127 | ### 4. Upgrade openai-whisper:
128 | ```bash
129 | pip3.10 install -U openai-whisper
130 | ```
131 |
--------------------------------------------------------------------------------
/public/subscribe-animation.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/public/subscribe-animation.mp4
--------------------------------------------------------------------------------
/public/white_reddit_template.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/public/white_reddit_template.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | python-dotenv
2 | gradio_client==1.5.4
3 | gradio==5.12.0
4 | openai==1.37.0
5 | httpx==0.27.2
6 | tiktoken
7 | tinydb
8 | tinymongo
9 | proglog
10 | yt-dlp>=2025.1.12
11 | torch
12 | torchaudio
13 | ### whisper timestamped
14 | whisper-timestamped
15 | protobuf==3.20.3
16 | pillow==10.4.0
17 | moviepy==2.1.2
18 | progress
19 | questionary
20 | edge-tts
21 |
--------------------------------------------------------------------------------
/runShortGPT.py:
--------------------------------------------------------------------------------
1 | from gui.gui_gradio import ShortGptUI
2 |
3 | app = ShortGptUI(colab=False)
4 | app.launch()
--------------------------------------------------------------------------------
/runShortGPTColab.py:
--------------------------------------------------------------------------------
1 | from gui.gui_gradio import ShortGptUI
2 |
3 | app = ShortGptUI(colab=True)
4 | app.launch()
5 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | import codecs
3 | import os
4 |
5 | here = os.path.abspath(os.path.dirname(__file__))
6 |
7 | with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
8 | long_description = "\n" + fh.read()
9 |
10 | VERSION = '0.1.31'
11 | DESCRIPTION = 'Automating video and short content creation with AI'
12 | LONG_DESCRIPTION = 'A powerful tool for automating content creation. It simplifies video creation, footage sourcing, voiceover synthesis, and editing tasks.'
13 |
14 |
15 | setup(
16 | name="shortgpt",
17 | version=VERSION,
18 | author="RayVentura",
19 | author_email="",
20 | description=DESCRIPTION,
21 | long_description_content_type="text/markdown",
22 | long_description=long_description,
23 | packages=find_packages(),
24 | package_data={'': ['*.yaml', '*.json']}, # This will include all yaml files in package
25 | install_requires=[
26 | 'python-dotenv',
27 | "openai==1.37.2",
28 | 'tiktoken',
29 | 'tinydb',
30 | 'tinymongo',
31 | 'proglog',
32 | 'yt-dlp',
33 | 'torch',
34 | 'whisper-timestamped',
35 | 'torchaudio',
36 | 'pillow==10.4.0',
37 | 'edge-tts',
38 | 'moviepy==2.1.2',
39 | 'progress',
40 | 'questionary',
41 | ],
42 | keywords=['python', 'video', 'content creation', 'AI', 'automation', 'editing', 'voiceover synthesis', 'video captions', 'asset sourcing', 'tinyDB'],
43 | classifiers=[
44 | "Development Status :: 5 - Production/Stable",
45 | "Intended Audience :: Developers",
46 | "Programming Language :: Python :: 3",
47 | "Operating System :: Unix",
48 | "Operating System :: MacOS :: MacOS X",
49 | "Operating System :: Microsoft :: Windows",
50 | ]
51 | )
--------------------------------------------------------------------------------
/shortGPT/__init__.py:
--------------------------------------------------------------------------------
1 | # import time
2 | # t1 = time.time()
3 | # from . import config
4 | # print("Took", time.time() - t1, "seconds to import config")
5 | # t1 = time.time()
6 | # from . import editing
7 | # print("Took", time.time() - t1, "seconds to import editing")
8 | # t1 = time.time()
9 | # from . import audio
10 | # print("Took", time.time() - t1, "seconds to import audio")
11 | # t1 = time.time()
12 | # from . import engine
13 | # print("Took", time.time() - t1, "seconds to import engine")
14 | # t1 = time.time()
15 | # from . import database
16 | # print("Took", time.time() - t1, "seconds to import database")
17 | # t1 = time.time()
18 | # from . import gpt
19 | # print("Took", time.time() - t1, "seconds to import gpt")
20 | # t1 = time.time()
21 | # from . import tracking
22 | # print("Took", time.time() - t1, "seconds to import tracking")
23 |
24 | # from . import config
25 | # from . import database
26 | # from . import editing_functions
27 | # from . import audio
28 | # from . import engine
29 | # from . import gpt
30 | # from . import tracking
--------------------------------------------------------------------------------
/shortGPT/api_utils/README.md:
--------------------------------------------------------------------------------
1 | # Module: api_utils
2 |
3 | The `api_utils` module provides utility functions for working with different APIs. It includes three files: `image_api.py`, `pexels_api.py`, and `eleven_api.py`. Each file contains functions related to a specific API.
4 |
5 | ## File: image_api.py
6 |
7 | This file contains functions for interacting with the Bing Images API and extracting image URLs from the HTML response.
8 |
9 | ### Functions:
10 |
11 | #### `_extractBingImages(html)`
12 |
13 | This function takes an HTML response as input and extracts image URLs, widths, and heights from it. It uses regular expressions to find the necessary information. The extracted image URLs are returned as a list of dictionaries, where each dictionary contains the URL, width, and height of an image.
14 |
15 | #### `_extractGoogleImages(html)`
16 |
17 | This function takes an HTML response as input and extracts image URLs from it. It uses regular expressions to find the necessary information. The extracted image URLs are returned as a list.
18 |
19 | #### `getBingImages(query, retries=5)`
20 |
21 | This function takes a query string as input and retrieves a list of image URLs from the Bing Images API. It replaces spaces in the query string with `+` and sends a GET request to the API. If the request is successful (status code 200), the HTML response is passed to `_extractBingImages` to extract the image URLs. If the request fails or no images are found, an exception is raised.
22 |
23 | ## File: pexels_api.py
24 |
25 | This file contains functions for interacting with the Pexels Videos API and retrieving video URLs based on a query string.
26 |
27 | ### Functions:
28 |
29 | #### `search_videos(query_string, orientation_landscape=True)`
30 |
31 | This function takes a query string and an optional boolean parameter `orientation_landscape` as input. It sends a GET request to the Pexels Videos API to search for videos based on the query string. The orientation of the videos can be specified as landscape or portrait. The function returns the JSON response from the API.
32 |
33 | #### `getBestVideo(query_string, orientation_landscape=True, used_vids=[])`
34 |
35 | This function takes a query string, an optional boolean parameter `orientation_landscape`, and an optional list `used_vids` as input. It calls the `search_videos` function to retrieve a list of videos based on the query string. It then filters and sorts the videos based on their dimensions and duration, and returns the URL of the best matching video. The `used_vids` parameter can be used to exclude previously used videos from the search results.
36 |
37 | ## File: eleven_api.py
38 |
39 | This file contains functions for interacting with the Eleven API and generating voice recordings based on text input.
40 |
41 | ### Functions:
42 |
43 | #### `getVoices(api_key="")`
44 |
45 | This function takes an optional API key as input and retrieves a dictionary of available voices from the Eleven API. The voices are returned as a dictionary, where the keys are voice names and the values are voice IDs.
46 |
47 | #### `getCharactersFromKey(key)`
48 |
49 | This function takes an API key as input and retrieves the remaining character limit for the given key. It sends a GET request to the Eleven API and extracts the character limit and count from the response.
50 |
51 | #### `generateVoice(text, character, fileName, stability=0.2, clarity=0.1, api_key="")`
52 |
53 | This function takes a text input, a character name, a file name, and optional parameters `stability`, `clarity`, and `api_key` as input. It generates a voice recording using the Eleven API and saves it to the specified file. The character name is used to select the appropriate voice. The stability and clarity parameters control the quality of the voice recording. The API key is required for authentication. If the request is successful, the file name is returned. Otherwise, an empty string is returned.
--------------------------------------------------------------------------------
/shortGPT/api_utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import image_api
2 | from . import eleven_api
--------------------------------------------------------------------------------
/shortGPT/api_utils/eleven_api.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import requests
4 |
5 |
6 | class ElevenLabsAPI:
7 |
8 | def __init__(self, api_key):
9 | self.api_key = api_key
10 | self.url_base = 'https://api.elevenlabs.io/v1/'
11 | self.get_voices()
12 |
13 | def get_voices(self):
14 | '''Get the list of voices available'''
15 | url = self.url_base + 'voices'
16 | headers = {'accept': 'application/json'}
17 | if self.api_key:
18 | headers['xi-api-key'] = self.api_key
19 | response = requests.get(url, headers=headers)
20 | self.voices = {voice['name']: voice['voice_id'] for voice in response.json()['voices']}
21 | return self.voices
22 |
23 | def get_remaining_characters(self):
24 | '''Get the number of characters remaining'''
25 | url = self.url_base + 'user'
26 | headers = {'accept': '*/*', 'xi-api-key': self.api_key, 'Content-Type': 'application/json'}
27 | response = requests.get(url, headers=headers)
28 |
29 | if response.status_code == 200:
30 | sub = response.json()['subscription']
31 | return sub['character_limit'] - sub['character_count']
32 | else:
33 | raise Exception(response.json()['detail']['message'])
34 |
35 | def generate_voice(self, text, character, filename, stability=0.2, clarity=0.1):
36 | '''Generate a voice'''
37 | if character not in self.voices:
38 | print(character, 'is not in the array of characters: ', list(self.voices.keys()))
39 |
40 | voice_id = self.voices[character]
41 | url = f'{self.url_base}text-to-speech/{voice_id}/stream'
42 | headers = {'accept': '*/*', 'xi-api-key': self.api_key, 'Content-Type': 'application/json'}
43 | data = json.dumps({"model_id": "eleven_multilingual_v2", "text": text, "stability": stability, "similarity_boost": clarity})
44 | response = requests.post(url, headers=headers, data=data)
45 |
46 | if response.status_code == 200:
47 | with open(filename, 'wb') as f:
48 | f.write(response.content)
49 | return filename
50 | else:
51 | message = response.text
52 | raise Exception(f'Error in response, {response.status_code} , message: {message}')
53 |
--------------------------------------------------------------------------------
/shortGPT/api_utils/image_api.py:
--------------------------------------------------------------------------------
1 | import json
2 | import requests
3 | import re
4 | import urllib.parse
5 |
6 | from urllib3 import Retry
7 |
8 | def _extractBingImages(html):
9 | pattern = r'mediaurl=(.*?)&.*?expw=(\d+).*?exph=(\d+)'
10 | matches = re.findall(pattern, html)
11 | result = []
12 |
13 | for match in matches:
14 | url, width, height = match
15 | if url.endswith('.jpg') or url.endswith('.png') or url.endswith('.jpeg'):
16 | result.append({'url': urllib.parse.unquote(url), 'width': int(width), 'height': int(height)})
17 |
18 | return result
19 |
20 |
21 | def _extractGoogleImages(html):
22 | images = []
23 | regex = re.compile(r"AF_initDataCallback\({key: 'ds:1', hash: '2', data:(.*?), sideChannel: {}}\);")
24 | match = regex.search(html)
25 | if match:
26 | dz = json.loads(match.group(1))
27 | for c in dz[56][1][0][0][1][0]:
28 | try:
29 | thing = list(c[0][0].values())[0]
30 | images.append(thing[1][3])
31 | except:
32 | pass
33 | return images
34 |
35 | import urllib.parse
36 | from requests.adapters import HTTPAdapter
37 |
38 | def getBingImages(query, retries=5):
39 | query = query.replace(" ", "+")
40 | images = []
41 | tries = 0
42 |
43 | # Create a session with custom retry strategy
44 | session = requests.Session()
45 | retry_strategy = Retry(
46 | total=retries,
47 | backoff_factor=1,
48 | status_forcelist=[500, 502, 503, 504]
49 | )
50 | adapter = HTTPAdapter(max_retries=retry_strategy)
51 | session.mount("https://", adapter)
52 |
53 | while(len(images) == 0 and tries < retries):
54 | try:
55 | # Use verify=False to bypass SSL verification (use with caution)
56 | response = session.get(
57 | f"https://www.bing.com/images/search?q={query}&first=1",
58 | verify=False,
59 | headers={
60 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
61 | }
62 | )
63 | if(response.status_code == 200):
64 | images = _extractBingImages(response.text)
65 | else:
66 | print("Error While making bing image searches", response.text)
67 | raise Exception("Error While making bing image searches")
68 | except requests.exceptions.SSLError as e:
69 | print(f"SSL Error occurred (attempt {tries + 1}/{retries}): {str(e)}")
70 | tries += 1
71 | if tries >= retries:
72 | raise Exception("Max retries reached - SSL Error while making Bing image searches")
73 | continue
74 |
75 | if(images):
76 | return images
77 | raise Exception("Error While making bing image searches")
--------------------------------------------------------------------------------
/shortGPT/api_utils/pexels_api.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | from shortGPT.config.api_db import ApiKeyManager
4 |
5 |
6 | def search_videos(query_string, orientation_landscape=True):
7 | url = "https://api.pexels.com/videos/search"
8 | headers = {
9 | "Authorization": ApiKeyManager.get_api_key("PEXELS_API_KEY")
10 | }
11 | params = {
12 | "query": query_string,
13 | "orientation": "landscape" if orientation_landscape else "portrait",
14 | "per_page": 15
15 | }
16 |
17 | response = requests.get(url, headers=headers, params=params)
18 | json_data = response.json()
19 | # print(response.headers['X-Ratelimit-Limit'])
20 | # print(response.headers['X-Ratelimit-Remaining'])
21 | # print(response.headers['X-Ratelimit-Reset'])
22 |
23 | return json_data
24 |
25 |
26 | def getBestVideo(query_string, orientation_landscape=True, used_vids=[]):
27 | vids = search_videos(query_string, orientation_landscape)
28 | videos = vids['videos'] # Extract the videos list from JSON
29 |
30 | # Filter and extract videos with width and height as 1920x1080 for landscape or 1080x1920 for portrait
31 | if orientation_landscape:
32 | filtered_videos = [video for video in videos if video['width'] >= 1920 and video['height'] >= 1080 and video['width']/video['height'] == 16/9]
33 | else:
34 | filtered_videos = [video for video in videos if video['width'] >= 1080 and video['height'] >= 1920 and video['height']/video['width'] == 16/9]
35 |
36 | # Sort the filtered videos by duration in ascending order
37 | sorted_videos = sorted(filtered_videos, key=lambda x: abs(15-int(x['duration'])))
38 |
39 | # Extract the top 3 videos' URLs
40 | for video in sorted_videos:
41 | for video_file in video['video_files']:
42 | if orientation_landscape:
43 | if video_file['width'] == 1920 and video_file['height'] == 1080:
44 | if not (video_file['link'].split('.hd')[0] in used_vids):
45 | return video_file['link']
46 | else:
47 | if video_file['width'] == 1080 and video_file['height'] == 1920:
48 | if not (video_file['link'].split('.hd')[0] in used_vids):
49 | return video_file['link']
50 | print("NO LINKS found for this round of search with query :", query_string)
51 | return None
52 |
--------------------------------------------------------------------------------
/shortGPT/audio/README.md:
--------------------------------------------------------------------------------
1 | # Audio Module
2 |
3 | The audio module provides a set of functions and classes for working with audio files and performing various operations on them.
4 |
5 | ## audio_utils.py
6 |
7 | This file contains utility functions for audio processing.
8 |
9 | ### downloadYoutubeAudio(url, outputFile)
10 | Downloads audio from a YouTube video given its URL and saves it to the specified output file. Returns the path to the downloaded audio file and its duration.
11 |
12 | ### speedUpAudio(tempAudioPath, outputFile, expected_chars_per_sec=CONST_CHARS_PER_SEC)
13 | Speeds up the audio to make it under 60 seconds. If the duration of the audio is greater than 57 seconds, it will be sped up to fit within the time limit. Otherwise, the audio will be left unchanged. Returns the path to the sped up audio file.
14 |
15 | ### ChunkForAudio(alltext, chunk_size=2500)
16 | Splits a text into chunks of a specified size (default is 2500 characters) to be used for audio generation. Returns a list of text chunks.
17 |
18 | ### audioToText(filename, model_size="base")
19 | Converts an audio file to text using a pre-trained model. Returns a generator object that yields the transcribed text and its corresponding timestamps.
20 |
21 | ### getWordsPerSec(filename)
22 | Calculates the average number of words per second in an audio file. Returns the words per second value.
23 |
24 | ### getCharactersPerSec(filename)
25 | Calculates the average number of characters per second in an audio file. Returns the characters per second value.
26 |
27 | ## audio_duration.py
28 |
29 | This file contains functions for getting the duration of audio files.
30 |
31 | ### get_duration_yt_dlp(url)
32 | Gets the duration of a YouTube video or audio using the yt_dlp library. Returns the duration in seconds.
33 |
34 | ### get_duration_ffprobe(signed_url)
35 | Gets the duration of an audio or video file using the ffprobe command line tool. Returns the duration in seconds.
36 |
37 | ### getAssetDuration(url, isVideo=True)
38 | Gets the duration of an audio or video asset from various sources, including YouTube and cloud storage providers. Returns the URL of the asset and its duration in seconds.
39 |
40 | ### getYoutubeAudioLink(url)
41 | Gets the audio link of a YouTube video given its URL. Returns the audio URL and its duration in seconds.
42 |
43 | ### getYoutubeVideoLink(url)
44 | Gets the video link of a YouTube video given its URL. Returns the video URL and its duration in seconds.
45 |
46 | ## voice_module.py
47 |
48 | This file contains an abstract base class for voice modules.
49 |
50 | ### VoiceModule
51 | An abstract base class that defines the interface for voice modules. Voice modules are responsible for generating voice recordings from text.
52 |
53 | #### update_usage()
54 | Updates the usage statistics of the voice module.
55 |
56 | #### get_remaining_characters()
57 | Gets the number of remaining characters that can be generated using the voice module.
58 |
59 | #### generate_voice(text, outputfile)
60 | Generates a voice recording from the specified text and saves it to the specified output file.
61 |
62 | ## eleven_voice_module.py
63 |
64 | This file contains a voice module implementation for the ElevenLabs API.
65 |
66 | ### ElevenLabsVoiceModule
67 | A voice module implementation for the ElevenLabs API. Requires an API key and a voice name to be initialized.
68 |
69 | #### update_usage()
70 | Updates the usage statistics of the ElevenLabs API.
71 |
72 | #### get_remaining_characters()
73 | Gets the number of remaining characters that can be generated using the ElevenLabs API.
74 |
75 | #### generate_voice(text, outputfile)
76 | Generates a voice recording from the specified text using the ElevenLabs API and saves it to the specified output file. Raises an exception if the API key does not have enough credits to generate the text.
--------------------------------------------------------------------------------
/shortGPT/audio/__init__.py:
--------------------------------------------------------------------------------
1 | from . import audio_utils
2 | from . import eleven_voice_module
3 | from . import audio_duration
--------------------------------------------------------------------------------
/shortGPT/audio/audio_duration.py:
--------------------------------------------------------------------------------
1 | import json
2 | import subprocess
3 |
4 | import yt_dlp
5 |
6 | from shortGPT.editing_utils.handle_videos import getYoutubeVideoLink
7 |
8 |
9 | def get_duration_yt_dlp(url):
10 | ydl_opts = {
11 | "quiet": True,
12 | "no_warnings": True,
13 | "no_color": True,
14 | "no_call_home": True,
15 | "no_check_certificate": True
16 | }
17 | try:
18 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
19 | dictMeta = ydl.extract_info(url, download=False, )
20 | return dictMeta['duration']
21 | except Exception as e:
22 | raise Exception(f"Failed getting duration from the following video/audio url/path using yt_dlp. {url} {e.args[0]}")
23 |
24 |
25 | def get_duration_ffprobe(signed_url):
26 | try:
27 | cmd = [
28 | "ffprobe",
29 | "-v",
30 | "quiet",
31 | "-print_format",
32 | "json",
33 | "-show_format",
34 | "-i",
35 | signed_url
36 | ]
37 | output = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
38 |
39 | if output.returncode != 0:
40 | return None, f"Error executing command using ffprobe. {output.stderr.strip()}"
41 |
42 | metadata = json.loads(output.stdout)
43 | duration = float(metadata["format"]["duration"])
44 | return duration, ""
45 | except Exception as e:
46 | print("Failed getting the duration of the asked ressource", e.args[0])
47 | return None, ""
48 |
49 |
50 | def get_asset_duration(url, isVideo=True):
51 | if ("youtube.com" in url):
52 | if not isVideo:
53 | url, _ = getYoutubeAudioLink(url)
54 | else:
55 | url, _ = getYoutubeVideoLink(url)
56 | # Trying two different method to get the duration of the video / audio
57 | duration, err_ffprobe = get_duration_ffprobe(url)
58 | if duration is not None:
59 | return url, duration
60 |
61 | duration = get_duration_yt_dlp(url)
62 | if duration is not None:
63 | return url, duration
64 | print(err_ffprobe)
65 | raise Exception(f"The url/path {url} does not point to a video/ audio. Impossible to extract its duration")
66 |
67 |
68 | def getYoutubeAudioLink(url):
69 | ydl_opts = {
70 | "quiet": True,
71 | "no_warnings": True,
72 | "no_color": True,
73 | "no_call_home": True,
74 | "no_check_certificate": True,
75 | "format": "bestaudio/best"
76 | }
77 | try:
78 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
79 | dictMeta = ydl.extract_info(
80 | url,
81 | download=False)
82 | return dictMeta['url'], dictMeta['duration']
83 | except Exception as e:
84 | print("Failed getting audio link from the following video/url", e.args[0])
85 | return None
86 |
--------------------------------------------------------------------------------
/shortGPT/audio/audio_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import time
4 |
5 | import yt_dlp
6 |
7 | from shortGPT.audio.audio_duration import get_asset_duration
8 |
9 | CONST_CHARS_PER_SEC = 20.5 # Arrived to this result after whispering a ton of shorts and calculating the average number of characters per second of speech.
10 |
11 | WHISPER_MODEL = None
12 |
13 |
14 |
15 | def downloadYoutubeAudio(url, outputFile):
16 | ydl_opts = {
17 | "quiet": True,
18 | "no_warnings": True,
19 | "no_color": True,
20 | "no_call_home": True,
21 | "no_check_certificate": True,
22 | "format": "bestaudio/best",
23 | "outtmpl": outputFile
24 | }
25 |
26 | attempts = 0
27 | max_attempts = 4
28 | while attempts < max_attempts:
29 | try:
30 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
31 | dictMeta = ydl.extract_info(
32 | url,
33 | download=True)
34 | if (not os.path.exists(outputFile)):
35 | raise Exception("Audio Download Failed")
36 | return outputFile, dictMeta['duration']
37 | except Exception as e:
38 | attempts += 1
39 | if attempts == max_attempts:
40 | raise Exception(f"Failed downloading audio from the following video/url for url {url}", e.args[0])
41 | time.sleep(1)
42 | continue
43 | return None
44 |
45 | def speedUpAudio(tempAudioPath, outputFile, expected_duration=None):
46 | tempAudioPath, duration = get_asset_duration(tempAudioPath, False)
47 | if not expected_duration:
48 | if (duration > 57):
49 | subprocess.run(['ffmpeg', '-loglevel', 'error', '-i', tempAudioPath, '-af', f'atempo={(duration/57):.5f}', outputFile])
50 | else:
51 | subprocess.run(['ffmpeg', '-loglevel', 'error', '-i', tempAudioPath, outputFile])
52 | else:
53 | subprocess.run(['ffmpeg', '-loglevel', 'error', '-i', tempAudioPath, '-af', f'atempo={(duration/expected_duration):.5f}', outputFile])
54 | if (os.path.exists(outputFile)):
55 | return outputFile
56 |
57 | def ChunkForAudio(alltext, chunk_size=2500):
58 | alltext_list = alltext.split('.')
59 | chunks = []
60 | curr_chunk = ''
61 | for text in alltext_list:
62 | if len(curr_chunk) + len(text) <= chunk_size:
63 | curr_chunk += text + '.'
64 | else:
65 | chunks.append(curr_chunk)
66 | curr_chunk = text + '.'
67 | if curr_chunk:
68 | chunks.append(curr_chunk)
69 | return chunks
70 |
71 |
72 | def audioToText(filename, model_size="base"):
73 | from whisper_timestamped import load_model, transcribe_timestamped
74 | global WHISPER_MODEL
75 | if (WHISPER_MODEL == None):
76 | WHISPER_MODEL = load_model(model_size)
77 | gen = transcribe_timestamped(WHISPER_MODEL, filename, verbose=False, fp16=False)
78 | return gen
79 |
80 |
81 | def getWordsPerSec(filename):
82 | a = audioToText(filename)
83 | return len(a['text'].split()) / a['segments'][-1]['end']
84 |
85 |
86 | def getCharactersPerSec(filename):
87 | a = audioToText(filename)
88 | return len(a['text']) / a['segments'][-1]['end']
89 |
90 | def run_background_audio_split(sound_file_path):
91 | try:
92 | # Run spleeter command
93 | # Get absolute path of sound file
94 | output_dir = os.path.dirname(sound_file_path)
95 | command = f"spleeter separate -p spleeter:2stems -o '{output_dir}' '{sound_file_path}'"
96 |
97 | process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
98 |
99 | # If spleeter runs successfully, return the path to the background music file
100 | if process.returncode == 0:
101 | return os.path.join(output_dir, sound_file_path.split("/")[-1].split(".")[0], "accompaniment.wav")
102 | else:
103 | return None
104 | except Exception:
105 | # If spleeter crashes, return None
106 | return None
107 |
--------------------------------------------------------------------------------
/shortGPT/audio/edge_voice_module.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from concurrent.futures import ThreadPoolExecutor
4 |
5 | import edge_tts
6 |
7 | from shortGPT.audio.voice_module import VoiceModule
8 | from shortGPT.config.languages import (EDGE_TTS_VOICENAME_MAPPING,
9 | LANGUAGE_ACRONYM_MAPPING, Language)
10 |
11 |
12 | def run_async_func(loop, func):
13 | return loop.run_until_complete(func)
14 |
15 |
16 | class EdgeTTSVoiceModule(VoiceModule):
17 | def __init__(self, voiceName):
18 | self.voiceName = voiceName
19 | super().__init__()
20 |
21 | def update_usage(self):
22 | return None
23 |
24 | def get_remaining_characters(self):
25 | return 999999999999
26 |
27 | def generate_voice(self, text, outputfile):
28 | loop = asyncio.new_event_loop()
29 | asyncio.set_event_loop(loop)
30 |
31 | try:
32 | with ThreadPoolExecutor() as executor:
33 | loop.run_in_executor(executor, run_async_func, loop, self.async_generate_voice(text, outputfile))
34 |
35 | finally:
36 | loop.close()
37 | if not os.path.exists(outputfile):
38 | print("An error happened during edge_tts audio generation, no output audio generated")
39 | raise Exception("An error happened during edge_tts audio generation, no output audio generated")
40 | return outputfile
41 |
42 | async def async_generate_voice(self, text, outputfile):
43 | try:
44 | communicate = edge_tts.Communicate(text, self.voiceName)
45 | with open(outputfile, "wb") as file:
46 | async for chunk in communicate.stream():
47 | if chunk["type"] == "audio":
48 | file.write(chunk["data"])
49 | except Exception as e:
50 | print("Error generating audio using edge_tts", e)
51 | raise Exception("An error happened during edge_tts audio generation, no output audio generated", e)
52 | return outputfile
53 |
--------------------------------------------------------------------------------
/shortGPT/audio/eleven_voice_module.py:
--------------------------------------------------------------------------------
1 | from shortGPT.api_utils.eleven_api import ElevenLabsAPI
2 | from shortGPT.audio.voice_module import VoiceModule
3 |
4 |
5 | class ElevenLabsVoiceModule(VoiceModule):
6 | def __init__(self, api_key, voiceName, checkElevenCredits=False):
7 | self.api_key = api_key
8 | self.voiceName = voiceName
9 | self.remaining_credits = None
10 | self.eleven_labs_api = ElevenLabsAPI(self.api_key)
11 | self.update_usage()
12 | if checkElevenCredits and self.get_remaining_characters() < 1200:
13 | raise Exception(f"Your ElevenLabs API KEY doesn't have enough credits ({self.remaining_credits} character remaining). Minimum required: 1200 characters (equivalent to a 45sec short)")
14 | super().__init__()
15 |
16 | def update_usage(self):
17 | self.remaining_credits = self.eleven_labs_api.get_remaining_characters()
18 | return self.remaining_credits
19 |
20 | def get_remaining_characters(self):
21 | return self.remaining_credits if self.remaining_credits else self.eleven_labs_api.get_remaining_characters()
22 |
23 | def generate_voice(self, text, outputfile):
24 | if self.get_remaining_characters() >= len(text):
25 | file_path =self.eleven_labs_api.generate_voice(text=text, character=self.voiceName, filename=outputfile)
26 | self.update_usage()
27 | return file_path
28 | else:
29 | raise Exception(f"You cannot generate {len(text)} characters as your ElevenLabs key has only {self.remaining_credits} characters remaining")
30 |
--------------------------------------------------------------------------------
/shortGPT/audio/voice_module.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | class VoiceModule(ABC):
3 |
4 | def __init__(self):
5 | pass
6 | @abstractmethod
7 | def update_usage(self):
8 | pass
9 |
10 | @abstractmethod
11 | def get_remaining_characters(self):
12 | pass
13 |
14 | @abstractmethod
15 | def generate_voice(self,text, outputfile):
16 | pass
--------------------------------------------------------------------------------
/shortGPT/config/README.md:
--------------------------------------------------------------------------------
1 | # Module: config
2 |
3 | The `config` module contains various files and functions related to configuration settings and utilities.
4 |
5 | ## File: config.py
6 |
7 | This file contains functions for reading and writing YAML files, as well as loading local assets specified in a YAML configuration file.
8 |
9 | ### Functions:
10 |
11 | #### `read_yaml_config(file_path: str) -> dict`
12 |
13 | This function reads and returns the contents of a YAML file as a dictionary.
14 |
15 | Parameters:
16 | - `file_path` - The path to the YAML file to be read.
17 |
18 | Returns:
19 | - A dictionary containing the contents of the YAML file.
20 |
21 | #### `write_yaml_config(file_path: str, data: dict)`
22 |
23 | This function writes a dictionary to a YAML file.
24 |
25 | Parameters:
26 | - `file_path` - The path to the YAML file to be written.
27 | - `data` - The dictionary to be written to the YAML file.
28 |
29 | #### `load_editing_assets() -> dict`
30 |
31 | This function loads all local assets from the static-assets folder specified in the yaml_config.
32 |
33 | Returns:
34 | - A dictionary containing the YAML configuration with updated local assets.
35 |
36 | ## File: asset_db.py
37 |
38 | This file contains a class `AssetDatabase` that provides methods for managing a database of assets.
39 |
40 | ### Class: AssetDatabase
41 |
42 | This class represents a database of assets and provides methods for adding, removing, and retrieving assets.
43 |
44 | Methods:
45 |
46 | #### `__init__()`
47 |
48 | This method initializes the `AssetDatabase` object. It creates the local and remote asset collections if they don't already exist.
49 |
50 | #### `asset_exists(name)`
51 |
52 | This method checks if an asset with the given name exists in the database.
53 |
54 | Parameters:
55 | - `name` - The name of the asset.
56 |
57 | Returns:
58 | - `True` if the asset exists, `False` otherwise.
59 |
60 | #### `add_local_asset(name, type, path)`
61 |
62 | This method adds a local asset to the database.
63 |
64 | Parameters:
65 | - `name` - The name of the asset.
66 | - `type` - The type of the asset.
67 | - `path` - The path to the asset file.
68 |
69 | #### `add_remote_asset(name, type, url)`
70 |
71 | This method adds a remote asset to the database.
72 |
73 | Parameters:
74 | - `name` - The name of the asset.
75 | - `type` - The type of the asset.
76 | - `url` - The URL of the remote asset.
77 |
78 | #### `remove_asset(name)`
79 |
80 | This method removes an asset from the database.
81 |
82 | Parameters:
83 | - `name` - The name of the asset.
84 |
85 | #### `get_df()`
86 |
87 | This method returns a pandas DataFrame with specific asset details.
88 |
89 | Returns:
90 | - A pandas DataFrame containing the asset details.
91 |
92 | #### `sync_local_assets()`
93 |
94 | This method loads all local assets from the static-assets folder into the database.
95 |
96 | #### `getAssetLink(key)`
97 |
98 | This method returns the link or path of an asset with the given key.
99 |
100 | Parameters:
101 | - `key` - The key of the asset.
102 |
103 | Returns:
104 | - The link or path of the asset.
105 |
106 | #### `getAssetDuration(key)`
107 |
108 | This method returns the duration of an asset with the given key.
109 |
110 | Parameters:
111 | - `key` - The key of the asset.
112 |
113 | Returns:
114 | - The duration of the asset.
115 |
116 | #### `updateLocalAsset(key: str)`
117 |
118 | This method updates the local asset with the given key.
119 |
120 | Parameters:
121 | - `key` - The key of the asset.
122 |
123 | Returns:
124 | - The file path and duration of the updated asset.
125 |
126 | #### `updateYoutubeAsset(key: str)`
127 |
128 | This method updates the YouTube asset with the given key.
129 |
130 | Parameters:
131 | - `key` - The key of the asset.
132 |
133 | Returns:
134 | - The remote URL and duration of the updated asset.
135 |
136 | ## File: api_db.py
137 |
138 | This file contains functions for managing API keys.
139 |
140 | ### Functions:
141 |
142 | #### `get_api_key(name)`
143 |
144 | This function retrieves the API key with the given name.
145 |
146 | Parameters:
147 | - `name` - The name of the API key.
148 |
149 | Returns:
150 | - The API key.
151 |
152 | #### `set_api_key(name, value)`
153 |
154 | This function sets the API key with the given name to the specified value.
155 |
156 | Parameters:
157 | - `name` - The name of the API key.
158 | - `value` - The value of the API key.
159 |
160 | ## File: languages.py
161 |
162 | This file contains an enumeration class `Language` that represents different languages.
163 |
164 | ### Enum: Language
165 |
166 | This enumeration class represents different languages and provides a list of supported languages.
167 |
168 | Supported Languages:
169 | - ENGLISH
170 | - SPANISH
171 | - FRENCH
172 | - ARABIC
173 | - GERMAN
174 | - POLISH
175 | - ITALIAN
176 | - PORTUGUESE
177 |
178 | ## File: path_utils.py
179 |
180 | This file contains utility functions for searching for program paths.
181 |
182 | ### Functions:
183 |
184 | #### `search_program(program_name)`
185 |
186 | This function searches for the specified program and returns its path.
187 |
188 | Parameters:
189 | - `program_name` - The name of the program to search for.
190 |
191 | Returns:
192 | - The path of the program, or None if the program is not found.
193 |
194 | #### `get_program_path(program_name)`
195 |
196 | This function retrieves the path of the specified program.
197 |
198 | Parameters:
199 | - `program_name` - The name of the program.
200 |
201 | Returns:
202 | - The path of the program, or None if the program is not found.
203 |
--------------------------------------------------------------------------------
/shortGPT/config/__init__.py:
--------------------------------------------------------------------------------
1 | from . import config
--------------------------------------------------------------------------------
/shortGPT/config/api_db.py:
--------------------------------------------------------------------------------
1 | import enum
2 | import os
3 | from shortGPT.database.db_document import TinyMongoDocument
4 | from dotenv import load_dotenv
5 | load_dotenv('./.env')
6 | class ApiProvider(enum.Enum):
7 | OPENAI = "OPENAI_API_KEY"
8 | GEMINI = "GEMINI_API_KEY"
9 | ELEVEN_LABS = "ELEVENLABS_API_KEY"
10 | PEXELS = "PEXELS_API_KEY"
11 |
12 |
13 | class ApiKeyManager:
14 | api_key_doc_manager = TinyMongoDocument("api_db", "api_keys", "key_doc", create=True)
15 |
16 | @classmethod
17 | def get_api_key(cls, key: str | ApiProvider):
18 | if isinstance(key, ApiProvider):
19 | key = key.value
20 |
21 | # Check if the key is present in the database
22 | api_key = cls.api_key_doc_manager._get(key)
23 | if api_key:
24 | return api_key
25 |
26 | # If not found in the database, check in the environment variables
27 | env_key = key.replace(" ", "_").upper()
28 | api_key = os.environ.get(env_key)
29 | if api_key:
30 | return api_key
31 |
32 | return ""
33 |
34 | @classmethod
35 | def set_api_key(cls, key: str | ApiProvider, value: str):
36 | if isinstance(key, ApiProvider):
37 | key = key.value
38 | return cls.api_key_doc_manager._save({key: value})
--------------------------------------------------------------------------------
/shortGPT/config/config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import os
3 | from dotenv import load_dotenv
4 |
5 | load_dotenv()
6 |
7 | ELEVEN_LABS_KEY = os.getenv('ELEVEN_LABS_API_KEY')
8 | OPENAI_KEY = os.getenv('OPENAI_API_KEY')
9 | PLAY_HT_USERID = os.getenv('PLAY_HT_USERID')
10 | PLAY_HT_API_KEY = os.getenv('PLAY_HT_API_KEY')
11 |
12 |
13 | def read_yaml_config(file_path: str) -> dict:
14 | """Reads and returns the contents of a YAML file as dictionary"""
15 | with open(file_path, 'r') as file:
16 | contents = yaml.safe_load(file)
17 | return contents
18 |
19 | def write_yaml_config(file_path: str, data: dict):
20 | """Writes a dictionary to a YAML file"""
21 | with open(file_path, 'w') as file:
22 | yaml.dump(data, file)
23 |
24 | def load_editing_assets() -> dict:
25 | """Loads all local assets from the static-assets folder specified in the yaml_config"""
26 | yaml_config = read_yaml_config("public.yaml")
27 | if yaml_config['local-assets'] == None:
28 | yaml_config['local-assets'] = {}
29 | # Create a copy of the dictionary before iterating over it
30 | local_paths = []
31 | if yaml_config['local-assets'] != {}:
32 | local_assets = yaml_config['local-assets'].copy()
33 | # Removing local paths that don't exist
34 | for key in local_assets:
35 | asset = local_assets[key]
36 | if(type(asset) == str):
37 | filePath = local_assets[key]
38 | else:
39 | filePath = local_assets[key]['path']
40 | if not os.path.exists(filePath):
41 | del yaml_config['local-assets'][key]
42 | else:
43 | local_paths.append(filePath)
44 |
45 | folder_path = 'public'
46 | for foldername, subfolders, filenames in os.walk(folder_path):
47 | for filename in filenames:
48 | file_path = os.path.join(foldername, filename).replace("\\", "/")
49 | if not file_path in local_paths:
50 | yaml_config['local-assets'][filename] = file_path
51 |
52 | write_yaml_config("public.yaml", yaml_config)
53 |
54 | return yaml_config
55 |
56 |
57 | # print(load_editing_assets())
58 | # print(read_yaml_config("editing_assets.yaml")['local-assets'])
59 |
--------------------------------------------------------------------------------
/shortGPT/config/path_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import platform
3 | import sys
4 | import subprocess
5 | import subprocess
6 | import tempfile
7 | def search_program(program_name):
8 | try:
9 | search_cmd = "where" if platform.system() == "Windows" else "which"
10 | return subprocess.check_output([search_cmd, program_name]).decode().strip()
11 | except subprocess.CalledProcessError:
12 | return None
13 |
14 | def get_program_path(program_name):
15 | program_path = search_program(program_name)
16 | return program_path
17 |
18 | def is_running_in_colab():
19 | return 'COLAB_GPU' in os.environ
20 |
21 | def handle_path(path, extension = ".mp4"):
22 | if 'https' in path:
23 | if is_running_in_colab():
24 | temp_file = tempfile.NamedTemporaryFile(suffix= extension, delete=False)
25 | # The '-y' option overwrites the output file if it already exists.
26 | command = ['ffmpeg', '-y', '-i', path, temp_file.name]
27 | subprocess.run(command, check=True)
28 | temp_file.close()
29 | return temp_file.name
30 | return path
--------------------------------------------------------------------------------
/shortGPT/database/README.md:
--------------------------------------------------------------------------------
1 | # Database Module Documentation
2 |
3 | The `database` module provides classes for managing database documents and data in the ShortGPT application. The module consists of three files:
4 |
5 | - `content_data_manager.py`: Defines the `ContentDataManager` class, which manages the content data for a document in the database.
6 | - `content_database.py`: Defines the `ContentDatabase` class, which provides methods for creating and accessing `ContentDataManager` instances.
7 | - `db_document.py`: Defines the `DatabaseDocument` abstract base class and the `TinyMongoDocument` class, which represents a document in a TinyMongo database.
8 |
9 | ## File: content_data_manager.py
10 |
11 | The `content_data_manager.py` file contains the `ContentDataManager` class, which is responsible for managing the content data for a document in the database.
12 |
13 | ### Class: ContentDataManager
14 |
15 | #### `__init__(self, db_doc: DatabaseDocument, content_type: str, new=False)`
16 |
17 | - Initializes a new instance of the `ContentDataManager` class.
18 | - Parameters:
19 | - `db_doc`: The `DatabaseDocument` instance representing the document in the database.
20 | - `content_type`: The type of content to be managed by the `ContentDataManager`.
21 | - `new`: (Optional) A boolean flag indicating whether the document is new or existing. Default is `False`.
22 |
23 | #### `save(self, key, value)`
24 |
25 | - Saves the specified key-value pair to the document.
26 | - Parameters:
27 | - `key`: The key of the data to be saved.
28 | - `value`: The value of the data to be saved.
29 |
30 | #### `get(self, key)`
31 |
32 | - Retrieves the value associated with the specified key from the document.
33 | - Parameters:
34 | - `key`: The key of the data to be retrieved.
35 | - Returns:
36 | - The value associated with the specified key.
37 |
38 | #### `_getId(self)`
39 |
40 | - Retrieves the ID of the document.
41 | - Returns:
42 | - The ID of the document.
43 |
44 | #### `delete(self)`
45 |
46 | - Deletes the document from the database.
47 |
48 | #### `__str__(self)`
49 |
50 | - Returns a string representation of the document.
51 |
52 | ## File: content_database.py
53 |
54 | The `content_database.py` file contains the `ContentDatabase` class, which provides methods for creating and accessing `ContentDataManager` instances.
55 |
56 | ### Class: ContentDatabase
57 |
58 | #### `instanciateContentDataManager(self, id: str, content_type: str, new=False)`
59 |
60 | - Creates a new `ContentDataManager` instance for the specified document ID and content type.
61 | - Parameters:
62 | - `id`: The ID of the document.
63 | - `content_type`: The type of content to be managed by the `ContentDataManager`.
64 | - `new`: (Optional) A boolean flag indicating whether the document is new or existing. Default is `False`.
65 | - Returns:
66 | - A new `ContentDataManager` instance.
67 |
68 | #### `getContentDataManager(self, id, content_type: str)`
69 |
70 | - Retrieves an existing `ContentDataManager` instance for the specified document ID and content type.
71 | - Parameters:
72 | - `id`: The ID of the document.
73 | - `content_type`: The type of content to be managed by the `ContentDataManager`.
74 | - Returns:
75 | - The existing `ContentDataManager` instance, or `None` if not found.
76 |
77 | #### `createContentDataManager(self, content_type: str) -> ContentDataManager`
78 |
79 | - Creates a new `ContentDataManager` instance for a new document with the specified content type.
80 | - Parameters:
81 | - `content_type`: The type of content to be managed by the `ContentDataManager`.
82 | - Returns:
83 | - A new `ContentDataManager` instance.
84 |
85 | ## File: db_document.py
86 |
87 | The `db_document.py` file contains the `DatabaseDocument` abstract base class and the `TinyMongoDocument` class, which represents a document in a TinyMongo database.
88 |
89 | ### Abstract Class: DatabaseDocument
90 |
91 | - An abstract base class that defines the interface for a database document.
92 | - Subclasses must implement the abstract methods:
93 | - `_save(self, key, data)`
94 | - `_get(self, key)`
95 | - `_getId(self)`
96 | - `__str__(self)`
97 | - `_delete(self)`
98 |
99 | ### Class: TinyMongoDocument
100 |
101 | - Represents a document in a TinyMongo database.
102 | - Inherits from the `DatabaseDocument` abstract base class.
103 |
104 | #### `__init__(self, db_name: str, collection_name: str, document_id: str, create=False)`
105 |
106 | - Initializes a new instance of the `TinyMongoDocument` class.
107 | - Parameters:
108 | - `db_name`: The name of the database.
109 | - `collection_name`: The name of the collection.
110 | - `document_id`: The ID of the document.
111 | - `create`: (Optional) A boolean flag indicating whether to create the document if it doesn't exist. Default is `False`.
112 |
113 | #### `exists(self)`
114 |
115 | - Checks if the document exists in the database.
116 | - Returns:
117 | - `True` if the document exists, `False` otherwise.
118 |
119 | #### `_save(self, data)`
120 |
121 | - Saves the specified data to the document.
122 | - Parameters:
123 | - `data`: The data to be saved.
124 |
125 | #### `_get(self, key=None)`
126 |
127 | - Retrieves the value associated with the specified key from the document.
128 | - Parameters:
129 | - `key`: (Optional) The key of the data to be retrieved. If not specified, returns the entire document.
130 | - Returns:
131 | - The value associated with the specified key, or the entire document if no key is specified.
132 |
133 | #### `_delete(self, key)`
134 |
135 | - Deletes the specified key from the document.
136 | - Parameters:
137 | - `key`: The key to be deleted.
138 |
139 | #### `_getId(self)`
140 |
141 | - Retrieves the ID of the document.
142 | - Returns:
143 | - The ID of the document.
144 |
145 | #### `__str__(self)`
146 |
147 | - Returns a string representation of the document.
--------------------------------------------------------------------------------
/shortGPT/database/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/shortGPT/database/__init__.py
--------------------------------------------------------------------------------
/shortGPT/database/content_data_manager.py:
--------------------------------------------------------------------------------
1 | from shortGPT.database.db_document import AbstractDatabaseDocument
2 |
3 |
4 | class ContentDataManager():
5 |
6 | def __init__(self, db_doc: AbstractDatabaseDocument, content_type: str, new=False):
7 | self.contentType = content_type
8 | self.db_doc = db_doc
9 | if new:
10 | self.db_doc._save({
11 | 'content_type': content_type,
12 | 'ready_to_upload': False,
13 | 'last_completed_step': 0,
14 | })
15 |
16 | def save(self, key, value):
17 | self.db_doc._save({key: value})
18 |
19 | def get(self, key):
20 | return self.db_doc._get(key)
21 |
22 | def _getId(self):
23 | return self.db_doc._getId()
24 |
25 | def delete(self):
26 | self.db_doc.delete()
27 |
28 | def __str__(self):
29 | return self.db_doc.__str__()
30 |
--------------------------------------------------------------------------------
/shortGPT/database/content_database.py:
--------------------------------------------------------------------------------
1 | from uuid import uuid4
2 | from shortGPT.database.db_document import TINY_MONGO_DATABASE, TinyMongoDocument
3 |
4 | from shortGPT.database.content_data_manager import ContentDataManager
5 | class ContentDatabase:
6 | def __init__(self, ):
7 | self.content_collection = TINY_MONGO_DATABASE["content_db"]["content_documents"]
8 |
9 | def instanciateContentDataManager(self, id: str, content_type: str, new=False):
10 | db_doc = TinyMongoDocument("content_db", "content_documents", id)
11 | return ContentDataManager(db_doc, content_type, new)
12 |
13 | def getContentDataManager(self, id, content_type: str):
14 | try:
15 | db_doc = TinyMongoDocument("content_db", "content_documents", id)
16 | return ContentDataManager(db_doc, content_type, False)
17 | except:
18 | return None
19 |
20 | def createContentDataManager(self, content_type: str) -> ContentDataManager:
21 | try:
22 | new_short_id = uuid4().hex[:24]
23 | db_doc = TinyMongoDocument("content_db", "content_documents", new_short_id, True)
24 | return ContentDataManager(db_doc, content_type, True)
25 | except:
26 | return None
27 |
28 |
--------------------------------------------------------------------------------
/shortGPT/database/db_document.py:
--------------------------------------------------------------------------------
1 | import threading
2 | from abc import ABC, abstractmethod
3 |
4 | import tinydb
5 | import tinymongo as tm
6 |
7 |
8 | class AbstractDatabaseDocument(ABC):
9 |
10 | @abstractmethod
11 | def _save(self, key, data):
12 | '''Save the data in the database'''
13 | pass
14 |
15 | @abstractmethod
16 | def _get(self, key):
17 | '''Get the data from the database'''
18 | pass
19 |
20 | @abstractmethod
21 | def _getId(self):
22 | '''Get the id of the document'''
23 | pass
24 |
25 | @abstractmethod
26 | def __str__(self):
27 | '''Return the string representation of the document'''
28 | pass
29 |
30 | @abstractmethod
31 | def _delete(self):
32 | '''Delete the document'''
33 | pass
34 |
35 |
36 | class TinyMongoClient(tm.TinyMongoClient):
37 | @property
38 | def _storage(self):
39 | return tinydb.storages.JSONStorage
40 |
41 |
42 | TINY_MONGO_DATABASE = TinyMongoClient("./.database")
43 |
44 |
45 | class TinyMongoDocument(AbstractDatabaseDocument):
46 | _lock = threading.Lock()
47 |
48 | def __init__(self, db_name: str, collection_name: str, document_id: str, create=False):
49 | self.collection = TINY_MONGO_DATABASE[db_name][collection_name]
50 | self.collection_name = collection_name
51 | self.document_id = document_id
52 | if (not self.exists()):
53 | if create:
54 | self.collection.insert_one({"_id": document_id})
55 | else:
56 | raise Exception(f"The document with id {document_id} in collection {collection_name} of database {db_name} does not exist")
57 |
58 | def exists(self):
59 | with self._lock:
60 | return self.collection.find({"_id": self.document_id}).count() == 1
61 |
62 | def _save(self, data):
63 | with self._lock:
64 | try:
65 | update_data = {'$set': {}}
66 | for key, value in data.items():
67 | path_parts = key.split(".")
68 |
69 | if len(path_parts) > 1:
70 | root_key = ".".join(path_parts[:-1])
71 | last_key = path_parts[-1]
72 | current_value = self._get(root_key)
73 | if not isinstance(current_value, dict):
74 | current_value = {}
75 | current_value[last_key] = value
76 | update_data['$set'][root_key] = current_value
77 | else:
78 | update_data['$set'][key] = value
79 |
80 | self.collection.update_one({'_id': self.document_id}, update_data)
81 | except Exception as e:
82 | print(f"Error saving data: {e}")
83 |
84 | def _get(self, key=None):
85 | with self._lock:
86 | try:
87 | document = self.collection.find_one({'_id': self.document_id})
88 | if not key:
89 | del document['_id']
90 | return document
91 | keys = key.split(".")
92 | value = document[keys[0]]
93 | for k in keys[1:]:
94 | value = value[k]
95 | return value
96 | except Exception as e:
97 | #print(f"Error getting value for key '{key}': {e}")
98 | return None
99 |
100 | def _delete(self, key):
101 | with self._lock:
102 | try:
103 | document = self.collection.find_one({'_id': self.document_id})
104 | if key in document:
105 | del document[key]
106 | self.collection.remove({'_id': self.document_id})
107 | self.collection.insert(document)
108 | else:
109 | print(f"Key '{key}' not found in the document")
110 | except Exception as e:
111 | print(f"Error deleting key '{key}': {e}")
112 |
113 | def _getId(self):
114 | return self.document_id
115 |
116 | def __str__(self):
117 | with self._lock:
118 | document = self.collection.find_one({'_id': self.document_id})
119 | return str(document)
120 |
--------------------------------------------------------------------------------
/shortGPT/editing_framework/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/shortGPT/editing_framework/__init__.py
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/shortGPT/editing_framework/editing_steps/__init__.py
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/add_background_video.json:
--------------------------------------------------------------------------------
1 | {
2 | "background_video": {
3 | "type": "video",
4 | "z": 0,
5 | "inputs":{
6 | "parameters": ["url"],
7 | "actions": ["set_time_start", "set_time_end"]
8 | },
9 | "parameters": {
10 | "url": null,
11 | "audio": false
12 | },
13 | "actions": [
14 | {
15 | "type": "set_time_start",
16 | "param": null
17 | },
18 | {
19 | "type": "set_time_end",
20 | "param": null
21 | }
22 | ]
23 | }
24 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/add_background_voiceover.json:
--------------------------------------------------------------------------------
1 | {
2 | "background_voiceover": {
3 | "inputs": {
4 | "parameters": ["url"],
5 | "actions": ["volume_percentage"]
6 | },
7 | "type": "audio",
8 | "z": -1,
9 | "parameters": {
10 | "url": null
11 | },
12 | "actions": [
13 | {
14 | "type": "volume_percentage",
15 | "param": null
16 | }
17 | ]
18 | }
19 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/add_voiceover.json:
--------------------------------------------------------------------------------
1 | {
2 | "voiceover": {
3 | "inputs": {
4 | "parameters": [
5 | "url"
6 | ]
7 | },
8 | "type": "audio",
9 | "z": -1,
10 | "parameters": {
11 | "url": null
12 | },
13 | "actions": [
14 |
15 | ]
16 | }
17 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/background_music.json:
--------------------------------------------------------------------------------
1 | {
2 | "background_music": {
3 | "inputs": {
4 | "parameters": ["url", "volume_percentage"],
5 | "actions":["loop_background_music"]
6 | },
7 | "type": "audio",
8 | "z": -1,
9 | "parameters": {
10 | "url": null
11 | },
12 | "actions": [
13 | {
14 | "type": "loop_background_music",
15 | "param": {
16 | "duration": null
17 | }
18 | },
19 | {
20 | "type":"normalize_audio",
21 | "param":{}
22 | },
23 | {
24 | "type": "volume_percentage",
25 | "param": null
26 | }
27 | ]
28 | }
29 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/crop_1920x1080_to_short.json:
--------------------------------------------------------------------------------
1 | {
2 | "background_video": {
3 | "type": "video",
4 | "z": 0,
5 | "inputs":{
6 | "parameters": ["url"]
7 | },
8 | "parameters": {
9 | "url": null,
10 | "audio": false
11 | },
12 | "actions": [
13 | {
14 | "type": "crop",
15 | "param": {
16 | "x1": 420,
17 | "y1": 0,
18 | "width": 1080,
19 | "height": 1080
20 | }
21 | },
22 | {
23 | "type": "resize",
24 | "param": {
25 | "width": 1920,
26 | "height": 1920
27 | }
28 | },
29 | {
30 | "type": "crop",
31 | "param": {
32 | "x1": 420,
33 | "y1": 0,
34 | "width": 1080,
35 | "height": 1920
36 | }
37 | }
38 | ]
39 | }
40 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/extract_audio.json:
--------------------------------------------------------------------------------
1 | {
2 | "extract_audio": {
3 | "inputs": {
4 | "parameters": ["url"],
5 | "actions": ["subclip", "set_time_start", "set_time_end"]
6 | },
7 | "type": "audio",
8 | "z": -2,
9 | "parameters": {
10 | "url": null
11 | },
12 | "actions": [
13 | {
14 | "type": "subclip",
15 | "param": null
16 | },
17 | {
18 | "type": "set_time_start",
19 | "param": null
20 | },
21 | {
22 | "type": "set_time_end",
23 | "param": null
24 | }
25 | ]
26 | }
27 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/insert_audio.json:
--------------------------------------------------------------------------------
1 | {
2 | "insert_audio": {
3 | "inputs": {
4 | "parameters": ["url"],
5 | "actions": ["set_time_start", "set_time_end"]
6 | },
7 | "type": "audio",
8 | "z": -1,
9 | "parameters": {
10 | "url": null
11 | },
12 | "actions": [
13 | {
14 | "type":"set_time_start",
15 | "param":null
16 | },
17 | {
18 | "type": "set_time_end",
19 | "param": null
20 | }
21 | ]
22 | }
23 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/make_caption.json:
--------------------------------------------------------------------------------
1 | {
2 | "caption": {
3 | "type": "text",
4 | "z": 4,
5 | "inputs":{
6 | "parameters": ["text"],
7 | "actions": ["set_time_start", "set_time_end"]
8 | },
9 | "parameters": {
10 | "text": null,
11 | "font_size": 100,
12 | "font": "fonts/LuckiestGuy-Regular.ttf",
13 | "color": "white",
14 | "stroke_width": 3,
15 | "stroke_color": "black",
16 | "method": "caption",
17 | "size":[900, 450]
18 | },
19 | "actions": [
20 | {
21 | "type": "set_time_start",
22 | "param": null
23 | },
24 | {
25 | "type": "set_time_end",
26 | "param": null
27 | },
28 | {
29 | "type": "screen_position",
30 | "param": {
31 | "pos": "center"
32 | }
33 | }
34 | ]
35 | }
36 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/make_caption_arabic.json:
--------------------------------------------------------------------------------
1 | {
2 | "caption": {
3 | "type": "text",
4 | "z": 4,
5 | "inputs":{
6 | "parameters": ["text"],
7 | "actions": ["set_time_start", "set_time_end"]
8 | },
9 | "parameters": {
10 | "text": null,
11 | "font_size": 100,
12 | "font": "fonts/LuckiestGuy-Regular.ttf",
13 | "color": "white",
14 | "stroke_width": 2,
15 | "stroke_color": "black",
16 | "method": "caption",
17 | "size":[900, 450]
18 | },
19 | "actions": [
20 | {
21 | "type": "set_time_start",
22 | "param": null
23 | },
24 | {
25 | "type": "set_time_end",
26 | "param": null
27 | },
28 | {
29 | "type": "screen_position",
30 | "param": {
31 | "pos": "center"
32 | }
33 | }
34 | ]
35 | }
36 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/make_caption_arabic_landscape.json:
--------------------------------------------------------------------------------
1 | {
2 | "caption": {
3 | "type": "text",
4 | "z": 4,
5 | "inputs":{
6 | "parameters": ["text"],
7 | "actions": ["set_time_start", "set_time_end"]
8 | },
9 | "parameters": {
10 | "text": null,
11 | "font_size": 100,
12 | "font": "fonts/LuckiestGuy-Regular.ttf",
13 | "color": "white",
14 | "stroke_width": 2,
15 | "stroke_color": "black"
16 | },
17 | "actions": [
18 | {
19 | "type": "set_time_start",
20 | "param": null
21 | },
22 | {
23 | "type": "set_time_end",
24 | "param": null
25 | },
26 | {
27 | "type": "screen_position",
28 | "param": {
29 | "pos": ["center", 800]
30 | }
31 | }
32 | ]
33 | }
34 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/make_caption_landscape.json:
--------------------------------------------------------------------------------
1 | {
2 | "caption": {
3 | "type": "text",
4 | "z": 4,
5 | "inputs":{
6 | "parameters": ["text"],
7 | "actions": ["set_time_start", "set_time_end"]
8 | },
9 | "parameters": {
10 | "text": null,
11 | "font_size": 100,
12 | "font": "fonts/LuckiestGuy-Regular.ttf",
13 | "color": "white",
14 | "stroke_width": 3,
15 | "stroke_color": "black",
16 | "method": "label"
17 | },
18 | "actions": [
19 | {
20 | "type": "set_time_start",
21 | "param": null
22 | },
23 | {
24 | "type": "set_time_end",
25 | "param": null
26 | },
27 | {
28 | "type": "screen_position",
29 | "param": {
30 | "pos": ["center", 820]
31 | }
32 | }
33 | ]
34 | }
35 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/show_reddit_image.json:
--------------------------------------------------------------------------------
1 | {
2 | "reddit_image": {
3 | "type": "image",
4 | "inputs":{
5 | "parameters": ["url"]
6 | },
7 | "z": 5,
8 | "parameters": {
9 | "url": null
10 | },
11 | "actions": [
12 | {
13 | "type": "set_time_start",
14 | "param": 0
15 | },
16 | {
17 | "type": "set_time_end",
18 | "param": 3.5
19 | },
20 |
21 | {
22 | "type": "screen_position",
23 | "param": {
24 | "pos": [
25 | "center","center"
26 | ]
27 | }
28 | }
29 | ]
30 | }
31 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/show_top_image.json:
--------------------------------------------------------------------------------
1 | {
2 | "top_image_1": {
3 | "type": "image",
4 | "inputs":{
5 | "parameters": ["url"],
6 | "actions": ["set_time_start", "set_time_end"]
7 | },
8 | "z": 5,
9 | "parameters": {
10 | "url": null
11 | },
12 | "actions": [
13 | {
14 | "type": "set_time_start",
15 | "param": null
16 | },
17 | {
18 | "type": "set_time_end",
19 | "param": null
20 | },
21 | {
22 | "type": "auto_resize_image",
23 | "param":{
24 | "maxWidth": 690,
25 | "maxHeight": 690
26 | }
27 | },
28 | {
29 | "type": "normalize_image",
30 | "param":{
31 | "maxWidth": 690,
32 | "maxHeight": 690
33 | }
34 | },
35 | {
36 | "type": "screen_position",
37 | "param": {
38 | "pos": [
39 | "center",
40 | 50
41 | ]
42 | }
43 | }
44 | ]
45 | }
46 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/show_watermark.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_watermark": {
3 | "inputs":{
4 | "parameters": ["text"]
5 | },
6 | "type": "text",
7 | "z": 3,
8 | "parameters": {
9 | "text": null,
10 | "font_size": 100,
11 | "font": "fonts/LuckiestGuy-Regular.ttf",
12 | "color": "white",
13 | "stroke_width": 1,
14 | "stroke_color": "black",
15 | "method": "caption",
16 | "size":[900, 450]
17 | },
18 | "actions": [
19 | {
20 | "type": "screen_position",
21 | "param": {
22 | "pos": [
23 | "center",
24 | 0.7
25 | ],
26 | "relative": true
27 | }
28 | }
29 | ]
30 | }
31 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/editing_steps/subscribe_animation.json:
--------------------------------------------------------------------------------
1 | {
2 | "subscribe_animation": {
3 | "type": "video",
4 | "z": 6,
5 | "inputs":{
6 | "parameters": ["url"]
7 | },
8 | "parameters": {
9 | "url": null,
10 | "audio": false
11 | },
12 | "actions": [
13 | {
14 | "type": "set_time_start",
15 | "param": 3.5
16 | },
17 | {
18 | "type": "resize",
19 | "param": {
20 | "new_size": 0.4
21 | }
22 | },
23 | {
24 | "type": "green_screen",
25 | "param": {
26 | "color": [
27 | 52,
28 | 255,
29 | 20
30 | ],
31 | "threshold": 100,
32 | "stiffness": 5
33 | }
34 | },
35 | {
36 | "type": "screen_position",
37 | "param": {
38 | "pos": ["center",
39 | 1160]
40 | }
41 | }
42 | ]
43 | }
44 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/flows/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/shortGPT/editing_framework/flows/__init__.py
--------------------------------------------------------------------------------
/shortGPT/editing_framework/flows/build_reddit_image.json:
--------------------------------------------------------------------------------
1 | {
2 | "inputs":{
3 | "username_text": "visual_assets/username_txt/parameters/text",
4 | "ncomments_text": "visual_assets/ncomments_txt/parameters/text",
5 | "nupvote_text": "visual_assets/nupvote_txt/parameters/text",
6 | "question_text": "visual_assets/question_txt/parameters/text"
7 | },
8 | "visual_assets":{
9 | "white_reddit_template_image": {
10 | "type": "image",
11 | "z": 0,
12 | "parameters": {
13 | "url": "public/white_reddit_template.png"
14 | },
15 | "actions": [
16 | ]
17 | },
18 | "username_txt": {
19 | "type": "text",
20 | "z": 1,
21 | "parameters": {
22 | "text": null,
23 | "font_size": 32,
24 | "font" : "fonts/Roboto-Bold.ttf",
25 | "color": "rgb(129, 131, 132)"
26 | },
27 | "actions": [
28 | {
29 | "type": "screen_position",
30 | "param": {
31 | "pos":[350, 43],
32 | "relative": false
33 | }
34 | }
35 | ]
36 | },
37 | "ncomments_txt":{
38 | "type": "text",
39 | "z": 1,
40 | "parameters": {
41 | "text": null,
42 | "font_size": 34,
43 | "font" : "fonts/Roboto-Bold.ttf",
44 | "color": "rgb(129, 131, 132)"
45 | },
46 | "actions": [
47 | {
48 | "type": "screen_position",
49 | "param": {
50 | "pos":[222, 301],
51 | "relative": false
52 | }
53 | }
54 | ]
55 | },
56 | "nupvote_txt":{
57 | "type": "text",
58 | "z": 1,
59 | "parameters": {
60 | "text": null,
61 | "font_size": 36,
62 | "font" : "fonts/Roboto-Bold.ttf",
63 | "color": "rgb(26, 26 , 27)"
64 | },
65 | "actions": [
66 | {
67 | "type": "screen_position",
68 | "param": {
69 | "pos":[28, 115],
70 | "relative": false
71 | }
72 | }
73 | ]
74 | },
75 | "question_txt": {
76 | "type": "text",
77 | "z": 1,
78 | "parameters": {
79 | "text": null,
80 | "font_size": 40,
81 | "font" : "fonts/Roboto-Bold.ttf",
82 | "color": "rgb(26, 26, 27)",
83 | "method": "label",
84 | "text_align": "left"
85 | },
86 | "actions": [
87 | {
88 | "type": "screen_position",
89 | "param": {
90 | "pos":[150, 110],
91 | "relative": false
92 | }
93 | }
94 | ]
95 | }
96 |
97 | }
98 | }
--------------------------------------------------------------------------------
/shortGPT/editing_framework/rendering_logger.py:
--------------------------------------------------------------------------------
1 | from proglog import ProgressBarLogger
2 | import time
3 |
4 | class MoviepyProgressLogger(ProgressBarLogger):
5 |
6 | def __init__(self, callBackFunction = None):
7 | super().__init__()
8 | self.callBackFunction = callBackFunction
9 | self.start_time = time.time()
10 |
11 | def bars_callback(self, bar, attr, value, old_value=None):
12 | # Every time the logger progress is updated, this function is called
13 | percentage = (value / self.bars[bar]['total']) * 100
14 | elapsed_time = time.time() - self.start_time
15 | estimated_time = (elapsed_time / percentage) * (100 - percentage) if percentage != 0 else 0
16 | progress_string = f'Rendering progress : {value}/{self.bars[bar]["total"]} | Time spent: {self.format_time(elapsed_time)} | Time left: {self.format_time(estimated_time)}'
17 | if (self.callBackFunction):
18 | self.callBackFunction(progress_string)
19 | else:
20 | print(progress_string)
21 |
22 | def format_time(self, seconds):
23 | minutes, seconds = divmod(seconds, 60)
24 | return f'{int(minutes)}m {int(seconds)}s'
25 |
--------------------------------------------------------------------------------
/shortGPT/editing_utils/README.md:
--------------------------------------------------------------------------------
1 | # Module: editing_utils
2 |
3 | The `editing_utils` module provides utility functions for editing videos and images. It consists of three files: `editing_images.py`, `captions.py`, and `handle_videos.py`.
4 |
5 | ## File: editing_images.py
6 |
7 | This file contains functions related to editing images.
8 |
9 | ### Function: getImageUrlsTimed(imageTextPairs)
10 |
11 | This function takes a list of image-text pairs and returns a list of tuples containing the image URL and the corresponding text. It uses the `searchImageUrlsFromQuery` function to search for image URLs based on the provided text.
12 |
13 | ### Function: searchImageUrlsFromQuery(query, top=3, expected_dim=[720,720], retries=5)
14 |
15 | This function searches for image URLs based on a given query. It uses the `getBingImages` function from the `shortGPT.api_utils.image_api` module to fetch the images. The `top` parameter specifies the number of images to fetch (default is 3), and the `expected_dim` parameter specifies the expected dimensions of the images (default is [720,720]). If no images are found, the function returns None. Otherwise, it selects the images with the closest dimensions to the expected dimensions and returns the URL of the first image.
16 |
17 | ## File: captions.py
18 |
19 | This file contains functions related to handling captions.
20 |
21 | ### Function: interpolateTimeFromDict(word_position, d)
22 |
23 | This function interpolates the time based on the word position in a dictionary. The dictionary contains word positions as keys and corresponding timestamps as values. Given a word position, the function returns the interpolated timestamp.
24 |
25 | ### Function: cleanWord(word)
26 |
27 | This function cleans a word by removing any non-alphanumeric characters.
28 |
29 | ### Function: getTimestampMapping(whisper_analysis)
30 |
31 | This function extracts the mapping of word positions to timestamps from a Whisper analysis. The `whisper_analysis` parameter is a dictionary containing the analysis results. The function returns a dictionary with word positions as keys and corresponding timestamps as values.
32 |
33 | ### Function: splitWordsBySize(words, maxCaptionSize)
34 |
35 | This function splits a list of words into captions based on a maximum caption size. The `maxCaptionSize` parameter specifies the maximum number of characters allowed in a caption (default is 15). The function returns a list of captions.
36 |
37 | ### Function: getCaptionsWithTime(whisper_analysis, maxCaptionSize=15)
38 |
39 | This function generates captions with their corresponding timestamps from a Whisper analysis. The `whisper_analysis` parameter is a dictionary containing the analysis results. The `maxCaptionSize` parameter specifies the maximum number of characters allowed in a caption (default is 15). The function uses the `getTimestampMapping` function to get the word position to timestamp mapping and the `splitWordsBySize` function to split the words into captions. It returns a list of caption-time pairs.
40 |
41 | ## File: handle_videos.py
42 |
43 | This file contains functions related to handling videos.
44 |
45 | ### Function: getYoutubeAudio(url)
46 |
47 | This function retrieves the audio URL and duration from a YouTube video. The `url` parameter specifies the URL of the YouTube video. The function uses the `yt_dlp` library to extract the audio information. It returns the audio URL and duration as a tuple. If the retrieval fails, it returns None.
48 |
49 | ### Function: getYoutubeVideoLink(url)
50 |
51 | This function retrieves the video URL and duration from a YouTube video. The `url` parameter specifies the URL of the YouTube video. The function uses the `yt_dlp` library to extract the video information. It returns the video URL and duration as a tuple. If the retrieval fails, it returns None.
52 |
53 | ### Function: extract_random_clip_from_video(video_url, video_duration, clip_duration, output_file)
54 |
55 | This function extracts a random clip from a video and saves it to an output file. The `video_url` parameter specifies the URL of the video, the `video_duration` parameter specifies the duration of the video, the `clip_duration` parameter specifies the duration of the desired clip, and the `output_file` parameter specifies the file path for the extracted clip. The function uses the `ffmpeg` library to perform the extraction. It randomly selects a start time within 15% to 85% of the video duration and extracts a clip of the specified duration starting from the selected start time. If the extraction fails or the output file is not created, an exception is raised.
--------------------------------------------------------------------------------
/shortGPT/editing_utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import editing_images
2 | from . import captions
--------------------------------------------------------------------------------
/shortGPT/editing_utils/captions.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | def getSpeechBlocks(whispered, silence_time=0.8):
4 | text_blocks, (st, et, txt) = [], (0,0,"")
5 | for i, seg in enumerate(whispered['segments']):
6 | if seg['start'] - et > silence_time:
7 | if txt: text_blocks.append([[st, et], txt])
8 | (st, et, txt) = (seg['start'], seg['end'], seg['text'])
9 | else:
10 | et, txt = seg['end'], txt + seg['text']
11 |
12 | if txt: text_blocks.append([[st, et], txt]) # For last text block
13 |
14 | return text_blocks
15 |
16 | def cleanWord(word):
17 | return re.sub(r'[^\w\s\-_"\'\']', '', word)
18 |
19 | def interpolateTimeFromDict(word_position, d):
20 | for key, value in d.items():
21 | if key[0] <= word_position <= key[1]:
22 | return value
23 | return None
24 |
25 | def getTimestampMapping(whisper_analysis):
26 | index = 0
27 | locationToTimestamp = {}
28 | for segment in whisper_analysis['segments']:
29 | for word in segment['words']:
30 | newIndex = index + len(word['text'])+1
31 | locationToTimestamp[(index, newIndex)] = word['end']
32 | index = newIndex
33 | return locationToTimestamp
34 |
35 |
36 | def splitWordsBySize(words, maxCaptionSize):
37 | halfCaptionSize = maxCaptionSize / 2
38 | captions = []
39 | while words:
40 | caption = words[0]
41 | words = words[1:]
42 | while words and len(caption + ' ' + words[0]) <= maxCaptionSize:
43 | caption += ' ' + words[0]
44 | words = words[1:]
45 | if len(caption) >= halfCaptionSize and words:
46 | break
47 | captions.append(caption)
48 | return captions
49 |
50 | def getCaptionsWithTime(transcriptions, maxCaptionSize=15, considerPunctuation=True):
51 | time_splits = []
52 | current_caption = []
53 | current_length = 0
54 |
55 | # Ensure we only work with transcriptions that have word-level timing
56 | segments = [seg for seg in transcriptions['segments'] if 'words' in seg]
57 |
58 | # Flatten all words from all segments
59 | all_words = []
60 | for segment in segments:
61 | all_words.extend(segment['words'])
62 |
63 | for i, word in enumerate(all_words):
64 | word_text = word['text']
65 |
66 | # Check if this word would exceed maxCaptionSize
67 | new_length = current_length + len(word_text) + (1 if current_caption else 0)
68 |
69 | # Determine if we should split here
70 | should_split = (
71 | new_length > maxCaptionSize or
72 | (considerPunctuation and word_text.rstrip('.,!?') != word_text and current_caption) or
73 | i == len(all_words) - 1 or
74 | len(current_caption) >= 5
75 | )
76 |
77 | # Add word to current caption if we're not splitting yet
78 | if not should_split:
79 | current_caption.append(word_text)
80 | current_length = new_length
81 | continue
82 |
83 | # Handle the split
84 | if current_caption:
85 | # Add current word if this is the last one
86 | if i == len(all_words) - 1 and new_length <= maxCaptionSize:
87 | current_caption.append(word_text)
88 |
89 | caption_text = ' '.join(current_caption)
90 | start_time = all_words[i - len(current_caption)]['start']
91 | end_time = word['end'] if word_text in current_caption else all_words[i - 1]['end']
92 | time_splits.append(((start_time, end_time), caption_text))
93 |
94 | # Handle current word if it wasn't added to the previous caption
95 | if word_text not in current_caption and i == len(all_words) - 1:
96 | time_splits.append(((word['start'], word['end']), word_text))
97 |
98 | # Reset for next caption
99 | current_caption = []
100 | current_length = 0
101 |
102 | # Start new caption with current word if it wasn't the last one
103 | if i < len(all_words) - 1:
104 | current_caption.append(word_text)
105 | current_length = len(word_text)
106 |
107 | return time_splits
--------------------------------------------------------------------------------
/shortGPT/editing_utils/editing_images.py:
--------------------------------------------------------------------------------
1 | from shortGPT.api_utils.image_api import getBingImages
2 | from tqdm import tqdm
3 | import random
4 | import math
5 |
6 | def getImageUrlsTimed(imageTextPairs):
7 | return [(pair[0], searchImageUrlsFromQuery(pair[1])) for pair in tqdm(imageTextPairs, desc='Search engine queries for images...')]
8 |
9 |
10 |
11 | def searchImageUrlsFromQuery(query, top=3, expected_dim=[720,720], retries=5):
12 | images = getBingImages(query, retries=retries)
13 | if(images):
14 | distances = list(map(lambda x: math.dist([x['width'], x['height']], expected_dim), images[0:top]))
15 | shortest_ones = sorted(distances)
16 | random.shuffle(shortest_ones)
17 | for distance in shortest_ones:
18 | image_url = images[distances.index(distance)]['url']
19 | return image_url
20 | return None
--------------------------------------------------------------------------------
/shortGPT/editing_utils/handle_videos.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import yt_dlp
4 | import subprocess
5 | import json
6 |
7 | def getYoutubeVideoLink(url):
8 | format_filter = "[height<=1920]" if 'shorts' in url else "[height<=1080]"
9 | ydl_opts = {
10 | "quiet": True,
11 | "no_warnings": True,
12 | "no_color": True,
13 | "no_call_home": True,
14 | "no_check_certificate": True,
15 | # Look for m3u8 formats first, then fall back to regular formats
16 | "format": f"bestvideo[ext=m3u8]{format_filter}/bestvideo{format_filter}"
17 | }
18 | try:
19 | with yt_dlp.YoutubeDL(ydl_opts) as ydl:
20 | dictMeta = ydl.extract_info(
21 | url,
22 | download=False)
23 | return dictMeta['url'], dictMeta['duration']
24 | except Exception as e:
25 | raise Exception(f"Failed getting video link from the following video/url {url} {e.args[0]}")
26 |
27 | def extract_random_clip_from_video(video_url, video_duration, clip_duration, output_file):
28 | """Extracts a clip from a video using a signed URL.
29 | Args:
30 | video_url (str): The signed URL of the video.
31 | video_url (int): Duration of the video.
32 | start_time (int): The start time of the clip in seconds.
33 | clip_duration (int): The duration of the clip in seconds.
34 | output_file (str): The output file path for the extracted clip.
35 | """
36 | if not video_duration:
37 | raise Exception("Could not get video duration")
38 | if not video_duration*0.7 > 120:
39 | raise Exception("Video too short")
40 | start_time = video_duration*0.15 + random.random()* (0.7*video_duration-clip_duration)
41 |
42 | command = [
43 | 'ffmpeg',
44 | '-loglevel', 'error',
45 | '-ss', str(start_time),
46 | '-t', str(clip_duration),
47 | '-i', video_url,
48 | '-c:v', 'libx264',
49 | '-preset', 'ultrafast',
50 | output_file
51 | ]
52 |
53 | subprocess.run(command, check=True)
54 |
55 | if not os.path.exists(output_file):
56 | raise Exception("Random clip failed to be written")
57 | return output_file
58 |
59 |
60 | def get_aspect_ratio(video_file):
61 | cmd = 'ffprobe -i "{}" -v quiet -print_format json -show_format -show_streams'.format(video_file)
62 | # jsonstr = subprocess.getoutput(cmd)
63 | jsonstr = subprocess.check_output(cmd, shell=True, encoding='utf-8')
64 | r = json.loads(jsonstr)
65 | # look for "codec_type": "video". take the 1st one if there are mulitple
66 | video_stream_info = [x for x in r['streams'] if x['codec_type']=='video'][0]
67 | if 'display_aspect_ratio' in video_stream_info and video_stream_info['display_aspect_ratio']!="0:1":
68 | a,b = video_stream_info['display_aspect_ratio'].split(':')
69 | dar = int(a)/int(b)
70 | else:
71 | # some video do not have the info of 'display_aspect_ratio'
72 | w,h = video_stream_info['width'], video_stream_info['height']
73 | dar = int(w)/int(h)
74 | ## not sure if we should use this
75 | #cw,ch = video_stream_info['coded_width'], video_stream_info['coded_height']
76 | #sar = int(cw)/int(ch)
77 | if 'sample_aspect_ratio' in video_stream_info and video_stream_info['sample_aspect_ratio']!="0:1":
78 | # some video do not have the info of 'sample_aspect_ratio'
79 | a,b = video_stream_info['sample_aspect_ratio'].split(':')
80 | sar = int(a)/int(b)
81 | else:
82 | sar = dar
83 | par = dar/sar
84 | return dar
--------------------------------------------------------------------------------
/shortGPT/engine/__init__.py:
--------------------------------------------------------------------------------
1 | from . import abstract_content_engine
2 | from . import reddit_short_engine
--------------------------------------------------------------------------------
/shortGPT/engine/abstract_content_engine.py:
--------------------------------------------------------------------------------
1 | import os
2 | from abc import ABC
3 |
4 | from shortGPT.audio.voice_module import VoiceModule
5 | from shortGPT.config.languages import Language
6 | from shortGPT.config.path_utils import get_program_path
7 | from shortGPT.database.content_database import ContentDatabase
8 |
9 | CONTENT_DB = ContentDatabase()
10 |
11 |
12 | class AbstractContentEngine(ABC):
13 | def __init__(self, short_id: str, content_type: str, language: Language, voiceModule: VoiceModule):
14 | if short_id:
15 | self.dataManager = CONTENT_DB.getContentDataManager(
16 | short_id, content_type
17 | )
18 | else:
19 | self.dataManager = CONTENT_DB.createContentDataManager(content_type)
20 | self.id = str(self.dataManager._getId())
21 | self.initializeFFMPEG()
22 | self.prepareEditingPaths()
23 | self._db_language = language.value
24 | self.voiceModule = voiceModule
25 | self.stepDict = {}
26 | self.default_logger = lambda _: None
27 | self.logger = self.default_logger
28 |
29 | def __getattr__(self, name):
30 | if name.startswith('_db_'):
31 | db_path = name[4:] # remove '_db_' prefix
32 | cache_attr = '_' + name
33 | if not hasattr(self, cache_attr):
34 | setattr(self, cache_attr, self.dataManager.get(db_path))
35 | return getattr(self, cache_attr)
36 | else:
37 | return super().__getattr__(name)
38 |
39 | def __setattr__(self, name, value):
40 | if name.startswith('_db_'):
41 | db_path = name[4:] # remove '_db_' prefix
42 | cache_attr = '_' + name
43 | setattr(self, cache_attr, value)
44 | self.dataManager.save(db_path, value)
45 | else:
46 | super().__setattr__(name, value)
47 |
48 | def prepareEditingPaths(self):
49 | self.dynamicAssetDir = f".editing_assets/{self.dataManager.contentType}_assets/{self.id}/"
50 | if not os.path.exists(self.dynamicAssetDir):
51 | os.makedirs(self.dynamicAssetDir)
52 |
53 | def verifyParameters(*args, **kargs):
54 | keys = list(kargs.keys())
55 | for key in keys:
56 | if not kargs[key]:
57 | print(kargs)
58 | raise Exception(f"Parameter :{key} is null")
59 |
60 | def isShortDone(self):
61 | return self._db_ready_to_upload
62 |
63 | def makeContent(self):
64 | while (not self.isShortDone()):
65 | currentStep = self._db_last_completed_step + 1
66 | if currentStep not in self.stepDict:
67 | raise Exception(f'Incorrect step {currentStep}')
68 | if self.stepDict[currentStep].__name__ == "_editAndRenderShort":
69 | yield currentStep, f'Current step ({currentStep} / {self.get_total_steps()}) : ' + "Preparing rendering assets..."
70 | else:
71 | yield currentStep, f'Current step ({currentStep} / {self.get_total_steps()}) : ' + self.stepDict[currentStep].__name__
72 | if self.logger is not self.default_logger:
73 | print(f'Step {currentStep} {self.stepDict[currentStep].__name__}')
74 | self.stepDict[currentStep]()
75 | self._db_last_completed_step = currentStep
76 |
77 | def get_video_output_path(self):
78 | return self._db_video_path
79 |
80 | def get_total_steps(self):
81 | return len(self.stepDict)
82 |
83 | def set_logger(self, logger):
84 | self.logger = logger
85 |
86 | def initializeFFMPEG(self):
87 | ffmpeg_path = get_program_path("ffmpeg")
88 | if not ffmpeg_path:
89 | raise Exception("FFmpeg, a program used for automated editing within ShortGPT was not found on your computer. Please go back to the README and follow the instructions to install FFMPEG")
90 | ffprobe_path = get_program_path("ffprobe")
91 | if not ffprobe_path:
92 | raise Exception("FFProbe, a dependecy of FFmpeg was not found. Please go back to the README and follow the instructions to install FFMPEG")
--------------------------------------------------------------------------------
/shortGPT/engine/facts_short_engine.py:
--------------------------------------------------------------------------------
1 | from shortGPT.audio.voice_module import VoiceModule
2 | from shortGPT.gpt import facts_gpt
3 | from shortGPT.config.languages import Language
4 | from shortGPT.engine.content_short_engine import ContentShortEngine
5 |
6 |
7 | class FactsShortEngine(ContentShortEngine):
8 |
9 | def __init__(self, voiceModule: VoiceModule, facts_type: str, background_video_name: str, background_music_name: str,short_id="",
10 | num_images=None, watermark=None, language:Language = Language.ENGLISH):
11 | super().__init__(short_id=short_id, short_type="facts_shorts", background_video_name=background_video_name, background_music_name=background_music_name,
12 | num_images=num_images, watermark=watermark, language=language, voiceModule=voiceModule)
13 |
14 | self._db_facts_type = facts_type
15 |
16 | def _generateScript(self):
17 | """
18 | Implements Abstract parent method to generate the script for the Facts short.
19 | """
20 | self._db_script = facts_gpt.generateFacts(self._db_facts_type)
21 |
22 |
--------------------------------------------------------------------------------
/shortGPT/gpt/__init__.py:
--------------------------------------------------------------------------------
1 | from . import gpt_utils
2 | from . import reddit_gpt
--------------------------------------------------------------------------------
/shortGPT/gpt/facts_gpt.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | import json
3 | def generateFacts(facts_type):
4 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/facts_generator.yaml')
5 | chat = chat.replace("<>", facts_type)
6 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1.3)
7 | return result
8 |
9 | def generateFactSubjects(n):
10 | out = []
11 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/facts_subjects_generation.yaml')
12 | chat = chat.replace("<>", f"{n}")
13 | maxAttempts = int(1.5*n)
14 | attempts=0
15 | while len(out) != n & attempts <= maxAttempts:
16 |
17 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1.69)
18 | attempts+=1
19 | try:
20 | out = json.loads(result.replace("'", '"'))
21 | except Exception as e:
22 | print(f"INFO - Failed generating {n} fact subjects after {attempts} trials", e)
23 | pass
24 | if len(out) != n:
25 | raise Exception(f"Failed to generate {n} subjects. In {attempts} attemps")
26 | return out
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_chat_video.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | import json
3 | def generateScript(script_description, language):
4 | out = {'script': ''}
5 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/chat_video_script.yaml')
6 | chat = chat.replace("<>", script_description).replace("<>", language)
7 | while not ('script' in out and out['script']):
8 | try:
9 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1)
10 | out = json.loads(result)
11 | except Exception as e:
12 | print(e, "Difficulty parsing the output in gpt_chat_video.generateScript")
13 | return out['script']
14 |
15 | def correctScript(script, correction):
16 | out = {'script': ''}
17 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/chat_video_edit_script.yaml')
18 | chat = chat.replace("<>", script).replace("<>", correction)
19 |
20 | while not ('script' in out and out['script']):
21 | try:
22 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1)
23 | out = json.loads(result)
24 | except Exception as e:
25 | print("Difficulty parsing the output in gpt_chat_video.generateScript")
26 | return out['script']
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_editing.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | import json
3 | def extractJsonFromString(text):
4 | start = text.find('{')
5 | end = text.rfind('}') + 1
6 | if start == -1 or end == 0:
7 | raise Exception("Error: No JSON object found in response")
8 | json_str = text[start:end]
9 | return json.loads(json_str)
10 |
11 |
12 | def getImageQueryPairs(captions, n=15, maxTime=2):
13 | chat, _ = gpt_utils.load_local_yaml_prompt('prompt_templates/editing_generate_images.yaml')
14 | prompt = chat.replace('<>', f"{captions}").replace("<>", f"{n}")
15 |
16 | try:
17 | # Get response and parse JSON
18 | res = gpt_utils.llm_completion(chat_prompt=prompt)
19 | data = extractJsonFromString(res)
20 | # Convert to pairs with time ranges
21 | pairs = []
22 | end_audio = captions[-1][0][1]
23 |
24 | for i, item in enumerate(data["image_queries"]):
25 | time = item["timestamp"]
26 | query = item["query"]
27 |
28 | # Skip invalid timestamps
29 | if time <= 0 or time >= end_audio:
30 | continue
31 |
32 | # Calculate end time for this image
33 | if i < len(data["image_queries"]) - 1:
34 | next_time = data["image_queries"][i + 1]["timestamp"]
35 | end = min(time + maxTime, next_time)
36 | else:
37 | end = min(time + maxTime, end_audio)
38 |
39 | pairs.append(((time, end), query + " image"))
40 |
41 | return pairs
42 |
43 | except json.JSONDecodeError:
44 | print("Error: Invalid JSON response from LLM")
45 | return []
46 | except KeyError:
47 | print("Error: Malformed JSON structure")
48 | return []
49 | except Exception as e:
50 | print(f"Error processing image queries: {str(e)}")
51 | return []
52 |
53 | def getVideoSearchQueriesTimed(captions_timed):
54 | """
55 | Generate timed video search queries based on caption timings.
56 | Returns list of [time_range, search_queries] pairs.
57 | """
58 | err = ""
59 |
60 | for _ in range(4):
61 | try:
62 | # Get total video duration from last caption
63 | end_time = captions_timed[-1][0][1]
64 |
65 | # Load and prepare prompt
66 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/editing_generate_videos.yaml')
67 | prompt = chat.replace("<>", f"{captions_timed}")
68 |
69 | # Get response and parse JSON
70 | res = gpt_utils.llm_completion(chat_prompt=prompt, system=system)
71 | data = extractJsonFromString(res)
72 |
73 | # Convert to expected format
74 | formatted_queries = []
75 | for segment in data["video_segments"]:
76 | time_range = segment["time_range"]
77 | queries = segment["queries"]
78 |
79 | # Validate time range
80 | if not (0 <= time_range[0] < time_range[1] <= end_time):
81 | continue
82 |
83 | # Ensure exactly 3 queries
84 | while len(queries) < 3:
85 | queries.append(queries[-1])
86 | queries = queries[:3]
87 |
88 | formatted_queries.append([time_range, queries])
89 |
90 | # Verify coverage
91 | if not formatted_queries:
92 | raise ValueError("Generated segments don't cover full video duration")
93 |
94 | return formatted_queries
95 | except Exception as e:
96 | err = str(e)
97 | print(f"Error generating video search queries {err}")
98 | raise Exception(f"Failed to generate video search queries {err}")
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_translate.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 |
3 | def translateContent(content, language):
4 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/translate_content.yaml')
5 | if language == "arabic":
6 | language =="arabic, and make the translated text two third of the length of the original."
7 | system = system.replace("<>", language)
8 | chat = chat.replace("<>", content)
9 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1)
10 | return result
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import re
4 | from time import sleep, time
5 |
6 | import openai
7 | import tiktoken
8 | import yaml
9 |
10 | from shortGPT.config.api_db import ApiKeyManager
11 |
12 |
13 | def num_tokens_from_messages(texts, model="gpt-4o-mini"):
14 | """Returns the number of tokens used by a list of messages."""
15 | try:
16 | encoding = tiktoken.encoding_for_model(model)
17 | except KeyError:
18 | encoding = tiktoken.get_encoding("cl100k_base")
19 | if model == "gpt-4o-mini": # note: future models may deviate from this
20 | if isinstance(texts, str):
21 | texts = [texts]
22 | score = 0
23 | for text in texts:
24 | score += 4 + len(encoding.encode(text))
25 | return score
26 | else:
27 | raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
28 | See https://github.com/openai/openai-python/blob/main/chatml.md for information""")
29 |
30 |
31 | def extract_biggest_json(string):
32 | json_regex = r"\{(?:[^{}]|(?R))*\}"
33 | json_objects = re.findall(json_regex, string)
34 | if json_objects:
35 | return max(json_objects, key=len)
36 | return None
37 |
38 |
39 | def get_first_number(string):
40 | pattern = r'\b(0|[1-9]|10)\b'
41 | match = re.search(pattern, string)
42 | if match:
43 | return int(match.group())
44 | else:
45 | return None
46 |
47 |
48 | def load_yaml_file(file_path: str) -> dict:
49 | """Reads and returns the contents of a YAML file as dictionary"""
50 | return yaml.safe_load(open_file(file_path))
51 |
52 |
53 | def load_json_file(file_path):
54 | with open(file_path, 'r', encoding='utf-8') as f:
55 | json_data = json.load(f)
56 | return json_data
57 |
58 | from pathlib import Path
59 |
60 | def load_local_yaml_prompt(file_path):
61 | _here = Path(__file__).parent
62 | _absolute_path = (_here / '..' / file_path).resolve()
63 | json_template = load_yaml_file(str(_absolute_path))
64 | return json_template['chat_prompt'], json_template['system_prompt']
65 |
66 |
67 | def open_file(filepath):
68 | with open(filepath, 'r', encoding='utf-8') as infile:
69 | return infile.read()
70 | from openai import OpenAI
71 |
72 | def llm_completion(chat_prompt="", system="", temp=0.7, max_tokens=2000, remove_nl=True, conversation=None):
73 | openai_key= ApiKeyManager.get_api_key("OPENAI_API_KEY")
74 | gemini_key = ApiKeyManager.get_api_key("GEMINI_API_KEY")
75 | if gemini_key:
76 | client = OpenAI(
77 | api_key=gemini_key,
78 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
79 | )
80 | model="gemini-2.0-flash-lite-preview-02-05"
81 | elif openai_key:
82 | client = OpenAI( api_key=openai_key)
83 | model="gpt-4o-mini"
84 | else:
85 | raise Exception("No OpenAI or Gemini API Key found for LLM request")
86 | max_retry = 5
87 | retry = 0
88 | error = ""
89 | for i in range(max_retry):
90 | try:
91 | if conversation:
92 | messages = conversation
93 | else:
94 | messages = [
95 | {"role": "system", "content": system},
96 | {"role": "user", "content": chat_prompt}
97 | ]
98 | response = client.chat.completions.create(
99 | model=model,
100 | messages=messages,
101 | max_tokens=max_tokens,
102 | temperature=temp,
103 | timeout=30
104 | )
105 | text = response.choices[0].message.content.strip()
106 | if remove_nl:
107 | text = re.sub('\s+', ' ', text)
108 | filename = '%s_llm_completion.txt' % time()
109 | if not os.path.exists('.logs/gpt_logs'):
110 | os.makedirs('.logs/gpt_logs')
111 | with open('.logs/gpt_logs/%s' % filename, 'w', encoding='utf-8') as outfile:
112 | outfile.write(f"System prompt: ===\n{system}\n===\n"+f"Chat prompt: ===\n{chat_prompt}\n===\n" + f'RESPONSE:\n====\n{text}\n===\n')
113 | return text
114 | except Exception as oops:
115 | retry += 1
116 | print('Error communicating with OpenAI:', oops)
117 | error = str(oops)
118 | sleep(1)
119 | raise Exception(f"Error communicating with LLM Endpoint Completion errored more than error: {error}")
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_voice.py:
--------------------------------------------------------------------------------
1 |
2 | from shortGPT.gpt import gpt_utils
3 | def getGenderFromText(text):
4 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/voice_identify_gender.yaml')
5 | chat = chat.replace("<>", text)
6 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system).replace("\n", "").lower()
7 | if 'female' in result:
8 | return 'female'
9 | return 'male'
--------------------------------------------------------------------------------
/shortGPT/gpt/gpt_yt.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | import json
3 |
4 | def generate_title_description_dict(content):
5 | out = {"title": "", "description":""}
6 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/yt_title_description.yaml')
7 | chat = chat.replace("<>", f"{content}")
8 |
9 | while out["title"] == "" or out["description"] == "":
10 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1)
11 | try:
12 | response = json.loads(result)
13 | if "title" in response:
14 | out["title"] = response["title"]
15 | if "description" in response:
16 | out["description"] = response["description"]
17 | except Exception as e:
18 | pass
19 |
20 | return out['title'], out['description']
21 |
--------------------------------------------------------------------------------
/shortGPT/gpt/reddit_gpt.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | import random
3 | import json
4 | def generateRedditPostMetadata(title):
5 | name = generateUsername()
6 | if title and title[0] == '"':
7 | title = title.replace('"', '')
8 | n_months = random.randint(1,11)
9 | header = f"{name} - {n_months} months ago"
10 | n_comments = random.random() * 10 + 2
11 | n_upvotes = n_comments*(1.2+ random.random()*2.5)
12 | return title, header, f"{n_comments:.1f}k", f"{n_upvotes:.1f}k"
13 |
14 |
15 | def getInterestingRedditQuestion():
16 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/reddit_generate_question.yaml')
17 | return gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1.08)
18 |
19 | def createRedditScript(question):
20 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/reddit_generate_script.yaml')
21 | chat = chat.replace("<>", question)
22 | result = "Reddit, " + question +" "+gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1.08)
23 | return result
24 |
25 |
26 | def getRealisticness(text):
27 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/reddit_filter_realistic.yaml')
28 | chat = chat.replace("<>", text)
29 | attempts = 0
30 | while attempts <= 4:
31 | attempts+=1
32 | try:
33 | result = gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1)
34 | return json.loads(result)['score']
35 | except Exception as e:
36 | print("Error in getRealisticness", e.args[0])
37 | raise Exception("LLM Failed to generate a realisticness score on the script")
38 |
39 | def getQuestionFromThread(text):
40 | if ((text.find("Reddit, ") < 15) and (10 < text.find("?") < 100)):
41 | question = text.split("?")[0].replace("Reddit, ", "").strip().capitalize()
42 | else:
43 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/reddit_filter_realistic.yaml')
44 | chat = chat.replace("<>", text)
45 | question = gpt_utils.llm_completion(chat_prompt=chat, system=system).replace("\n", "")
46 | question = question.replace('"', '').replace("?", "")
47 | return question
48 |
49 |
50 | def generateUsername():
51 | chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/reddit_username.yaml')
52 | return gpt_utils.llm_completion(chat_prompt=chat, system=system, temp=1.2).replace("u/", "")
53 |
54 |
55 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayVentura/ShortGPT/3df4e0f7a422bf7386565d498bf4521a2544c614/shortGPT/prompt_templates/__init__.py
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/chat_video_edit_script.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You are an expert video script writer / editor. You ONLY write text that is read. You only write the script that will be read by a voice actor for a video. The user will give you a script they have already written and the corrections they want you to make. From that, you will edit the script. Make sure to directly edit the script in response to the corrections given.
3 | Your edited script will not have any reference to the audio footage / video footage shown. Only the text that will be narrated by the voice actor.
4 | You will edit purely text.
5 | Don't write any other textual thing than the text itself.
6 | Make sure the text is not longer than 200 words (keep the video pretty short and neat).
7 | # Output
8 | You will output the edited script in a JSON format of this kind, and only a parsable JSON object
9 | {"script": "did you know that ... ?"}
10 |
11 | chat_prompt: |
12 | Original script:
13 | <>
14 | Corrections:
15 | <>
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/chat_video_script.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You are an expert video writer. You ONLY produce text that is read. You only produce the script. that will be read by a voice actor for a video. The user will give you the description of the video they want you to make and from that, you will write the script. Make sure to directly write the script in response to the video description.
3 | Your script will not have any reference to the audio footage / video footage shown. Only the text that will be narrated by the voice actor.
4 | You will produce purely text.
5 | Don't write any other textual thing than the text itself.
6 | Make sure the text is not longer than 200 words (keep the video pretty short and neat).
7 | # Output
8 | You will output the script in a JSON format of this kind, and only a parsable JSON object
9 | {"script": "did you know that ... ?"}
10 |
11 | chat_prompt: |
12 | Language: <>
13 | Video description:
14 | <>
15 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/editing_generate_images.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You are an AI specialized in generating precise image search queries for video editing. You must output ONLY valid JSON in the specified format, with no additional text.
3 |
4 | chat_prompt: |
5 | You are a shorts video editor. Your audience is people from 18 yo to 40yo. Your style of editing is pretty simple, you take the transcript of your short and put a very simple google image to illustrate the narrated sentences.
6 |
7 | Each google image is searched with a short query of two words maximum. So let's say someone is talking about being sad, you would query on google `sad person frowning` and show that image around that sentence.
8 |
9 | I will give you a transcript which contains which words are shown at the screen, and the timestamps where they are shown. Understand the transcript, and time images at timestamps and, write me the query for each image. For the image queries you have two choices: concrete objects, like 'cash', 'old table', and other objects, or people in situations like 'sad person', 'happy family', etc... Generate a maximum of <> image queries equally distributed in the video.
10 |
11 | Avoid depicting shocking or nude / crude images, since your video will get demonetized. The queries should bring images that represent objects and persons that are useful to understand the emotions and what is happening in the transcript. The queries should describe OBJECTS or PERSONS. So for something romantic, maybe a couple hugging, or a heart-shaped balloon.
12 |
13 | The images should be an image representation of what is happening. Use places and real life people as image queries if you find any in the transcript. Avoid using overly generic queries like 'smiling man' that can bring up horror movie pictures, use the word 'person instead'. Instead, try to use more specific words that describe the action or emotion in the scene.
14 |
15 | IMPORTANT OUTPUT RULES:
16 | 1. NEVER use abstract nouns in the queries
17 | 2. ALWAYS use real objects or persons in the queries
18 | 3. Choose more objects than people
19 | 4. Generate exactly <> queries
20 | 5. Output must be valid JSON in this format:
21 | {
22 | "image_queries": [
23 | {"timestamp": 1.0, "query": "happy person"},
24 | {"timestamp": 3.2, "query": "red car"}
25 | ]
26 | }
27 |
28 | Transcript:
29 | <>
30 |
31 | Generate exactly <> evenly distributed image queries based on the transcript above. Output ONLY the JSON response, no additional text.
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/editing_generate_videos.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You are an AI specialized in generating precise video search queries for video editing. You must output ONLY valid JSON in the specified format, with no additional text.
3 |
4 | chat_prompt: |
5 | You are a video editor specializing in creating engaging visual content. Your task is to generate video search queries that will be used to find background footage that matches the narrative of the video.
6 |
7 | For each time segment (4-5 seconds long), you need to suggest 3 alternative search queries that could be used to find appropriate video footage. Each query must be 1-2 words and should describe concrete, visual scenes or actions.
8 |
9 | Guidelines for queries:
10 | 1. Use ONLY English words
11 | 2. Keep queries between 1-2 words
12 | 3. Focus on visual, concrete objects or actions
13 | 4. Avoid abstract concepts
14 | 5. Include both static and dynamic scenes
15 | 6. Ensure queries are family-friendly and safe for monetization
16 |
17 | Good examples:
18 | - "ocean waves"
19 | - "typing keyboard"
20 | - "city traffic"
21 |
22 | Bad examples:
23 | - "feeling sad" (abstract)
24 | - "beautiful nature landscape morning sun" (too many words)
25 | - "confused thoughts" (not visual)
26 |
27 | The output must be valid JSON in this format:
28 | {
29 | "video_segments": [
30 | {
31 | "time_range": [0.0, 4.324],
32 | "queries": ["coffee steam", "hot drink", "morning breakfast"]
33 | },
34 | {
35 | "time_range": [4.324, 9.56],
36 | "queries": ["office work", "desk computer", "typing hands"]
37 | }
38 | ]
39 | }
40 |
41 | Timed captions:
42 | <>
43 |
44 | Generate video segments of 4-5 seconds covering the entire video duration.
45 | Make sure to perfectly fit the end of the video, with the EXACT same floating point accuracy as in the transcript above.
46 | Output ONLY the JSON response, no additional text.
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/facts_generator.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 | You are an expert content writer of a YouTube shorts channel. You specialize in `facts` shorts.
3 | Your facts shorts are less than 50 seconds verbally ( around 140 words maximum). They are extremely captivating, and original.
4 | The user will ask you a type of facts short and you will produce it.
5 | For examples, when the user Asks :
6 | `Weird facts`
7 | You produce the following content script:
8 |
9 | ---
10 | Weird facts you don't know.
11 | A swarm of 20,000 bees followed a car for two days because their queen was stuck inside.
12 | Rockados cannot stick their tongue out because it's attached to the roof of their mouths.
13 |
14 | If you tickle a rat day after day, it will start laughing whenever it sees you.
15 |
16 | In 2013, police and the Maldives arrested a coconut for lordering near a polling station for the presidential election.
17 | Locals fear the coconut may have been ingrained with a black magic spell to influence the election.
18 |
19 | A Chinese farmer who always wanted to own his own plane built a full scale,
20 | non-working replica of an airbus A320 out of 50 tons of steel. It took him and his friends over two years and costed over $400,000.
21 |
22 | When invited by a lady to spend a night with her, Benjamin Franklin asked to postpone until winter when nights were longer.
23 | ---
24 |
25 | You are now tasked to produce the greatest short script depending on the user's request type of 'facts'.
26 | Only give the first `hook`, like "Weird facts you don't know. " in the example. Then the facts.
27 | Keep it short, extremely interesting and original.
28 |
29 | chat_prompt: >
30 | <>
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/facts_subjects_generation.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 |
3 | chat_prompt: >
4 | For a series of <> youtube video about top 10 facts on a certain subject,
5 | pick a random subject. Be very original. Put it in the '`Subject` facts' format.
6 | Give the output in an array format that's json parseable., like ['Police facts', 'prison facts'].
7 | Only give the array and nothing else.
8 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_extract_question.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | From the transcript of a reddit ask, tell me the question in the title. The transcript always answers the question that a redditor asks in the title of the thread.
3 | The question in the title must be a very shorts open-ended question that requires opinion/anecdotal-based answers. Examples of questions are:
4 | ---
5 | What’s the worst part of having a child?
6 | What screams “this person peaked in high school” to you?
7 | What was your “it can’t be that easy / it was that easy” moment in your life?
8 | ---
9 | Rules:
10 | Most important rule : The question MUST be directed at the person reading it, the subject of the question should ALWAYS be the reader. It must contain 'you' or 'your', or something asking THEM their experience.
11 | * The question is always very general, and then, people answer it with a specific anecdote that is related to that question. The question is always short and can bring spicy answers. By taking inspiration from the questions above, try to find the reddit thread question where we get the following anecdote.
12 | * The question NEVER contains "I" as it is NOT answered by the person asking it.
13 | * The question is NEVER specific too specific about a certain situation.
14 | * The question should be as short and consise as possible. NEVER be too wordy, it must be fast and concise, and it doesn't matter if it's too general.
15 | * The question must sound good to the ear, and bring interest. It should sound natural.
16 | * The question must use the vocabulary of reddit users. Young, not too complicated, and very straight to the point.
17 | * The question must be relatable for anyone, girl or guy.
18 | The question should ALWAYS START with "What"
19 | chat_prompt: |
20 | -Transcript:
21 | <>
22 | The question should ALWAYS START with "What"
23 | -Most probable very short and conssise open-ended question from the transcript (50 characters MAXIMUM):
24 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_filter_realistic.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You are the judge of the story. Your goal will be to judge if it can possibly happen.
3 | If it's possible and the story makes sense, then it's a 10, and if it's something that wouldn't ever happen in real life or something that doesn't make sense at all, it's a 0.
4 | You have to be tolerant and keep in mind that the stories are sometimes very unlikely, but really happened, so you will only give a low score when something doesn't make sense in the story.
5 |
6 | For parsing purposes, you will ALWAYS the output as a JSON OBJECT with the key `score` and the value being the number between 1 to 10 and.
7 | The output should be perfect parseable json, like:
8 | {"score": 1.3}
9 |
10 | chat_prompt: |
11 | Story:
12 | <>
13 | Output:
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_generate_question.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | You will write an interesting reddit ask thread question.
3 |
4 | Instructions for the question:
5 | The question in the must be a very shorts open-ended question that requires opinion/anecdotal-based answers. Examples of questions are:
6 | ---
7 | What’s the worst part of having a child?
8 | What screams “this person peaked in high school” to you?
9 | What was your “it can’t be that easy / it was that easy” moment in your life?
10 | Have you ever had a bad date turning into a good one?
11 | ---
12 | Most important rule for questions : The question MUST be directed at the person reading it, the subject of the question should ALWAYS be the reader. It must contain 'you' or 'your', or something asking THEM their experience.
13 | * The question is always very general, and then, people answer it with a specific anecdote that is related to that question. The question is always short and can bring spicy answers.
14 | * The question NEVER contains 'I' as it is NOT answered by the person asking it.
15 | * The question is NEVER too specific about a certain situation.
16 | * The question should be as short and consise as possible. NEVER be too wordy, it must be fast and concise.
17 | * The question must sound good to the ear, and bring interest. It should sound natural.
18 | * The question must use the vocabulary of reddit users. Young, not too complicated, and very straight to the point.
19 | The question must spark curiosity and interest, and must create very entertaining answers
20 | * The question must be relatable for anyone, girl or guy.
21 | * The question is maximum 80 characters long
22 |
23 | chat_prompt: |
24 | Totally new question:
25 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_generate_script.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | Instructions for the new story:
3 | You are a YouTube shorts content creator who makes extremely good YouTube shorts over answers from AskReddit questions. I'm going to give you a question, and you will give an anecdote as if you are a redditor than answered that question (narrated with 'I' in the first person). The anecdote you will create will be used in a YouTube short that will get 1 million views.
4 | 1- The story must be between 120 and 140 words MAXIMUM.
5 | 2- DO NOT end the story with a moral conclusion or any sort of conclusion that elongates the personal story. Just stop it when it makes sense.
6 | 3- Make sure that the story is very SPICY, very unusual, HIGHLY entertaining to listen to, not boring, and not a classic story that everyone tells.
7 | 4- Make sure that the new short's content is totally captivating and will bang with the YouTube algorithm.
8 | 5- Make sure that the story directly answers the title.
9 | 6- Make the question sound like an r/AskReddit question: open-ended and very interesting, very short and not too specific.
10 | 7- The language used in the story must be familiar, casual that a normal person telling an story would use. Even youthful.
11 | 8- The story must be narrated as if you're a friend of the viewer telling them about the story.
12 | 9- Start the the story with 'I'
13 |
14 | chat_prompt: |
15 | Reddit question: <>
16 |
17 | -New Generated story. The story has to be highly unusual and spicy and must really surprise its listeners and hook them up to the story. Don't forget to make it between 120 and 140 words:
18 | Reddit, <>
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_story_filter.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 | You're a judge of the realisticness of a story for a youtube short.
3 | You must put yourself in the shoes of the youtube viewer hearing this story
4 | and determine if it's totally nonsense.
5 | Your goal will be to judge if it can possibly happen.
6 | If it's possible and the story makes sense, then it's a 10,
7 | and if it's something that wouldn't ever happen in real life or
8 | something that doesn't make sense at all, it's a 0.
9 |
10 | You have to be tolerant and keep in mind that the stories are meant to be unusual, they are sometimes very unlikely,
11 | but really happened, so you will only give a low score when something doesn't make sense in the story.
12 | For parsing purposes, you will ALWAYS the output as a JSON OBJECT with the key
13 | 'score' and the value being the number between 1 to 10 and the key 'explanation'
14 | with one sentence to explain why it's not. Make this explanation maximum 4 words.
15 | The output should look like:
16 | {"score": 4.5, "explanation": "some words..."}
17 |
18 | Give perfect json with keys score and explanation, and nothing else.
19 |
20 | chat_prompt: >
21 | Story:
22 |
23 | <>
24 |
25 | Output:
26 |
27 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/reddit_username.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 |
3 | chat_prompt: >
4 | Generate a random Reddit name with one or two numbers inside the name. Only generate one name, and don't output anything else. Make it sound natural. The name must be between 7 and 10 characters:
5 | u/
6 |
7 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/translate_content.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 | You're an expert content translator to <>.
3 | The user will give you any text in any language, and your task is to perfectly translate it to <>.
4 | **
5 |
6 | chat_prompt: >
7 | <>
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/voice_identify_gender.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: |
2 | I will give you a narrated transcript and you must identify if it's most probably a male or female.
3 | If you think the narrator is more probable to be a male, answer "male" and if you think it's female, say "female".
4 | If you don't know, just say male.
5 |
6 |
7 | chat_prompt: |
8 | Transcript:
9 |
10 | <>
11 |
12 | Gender of narrator:
13 |
14 |
--------------------------------------------------------------------------------
/shortGPT/prompt_templates/yt_title_description.yaml:
--------------------------------------------------------------------------------
1 | system_prompt: >
2 | You are a youtube shorts title and description expert writer.
3 | The user will give you the transcript of a youtube short, and you will create a title, and a description. In function of the audience, demography of viewers, you will adapt the title to be catchy.
4 | Use only MAXIMUM 2 emojis in the title of the video ( very depending on the context, be careful)
5 | and use hashtags in the description
6 | The title has to be less than 80 characters (one small sentance of 10 words max)
7 | And the description maximum 240 characters (keep it small)
8 | You will give the title and description in a perfect json format. You will give nothing else but the perfect json object with key `title` and `description`
9 | In your JSON, use the double quotes "" instead of ''
10 | chat_prompt: >
11 | <>
12 |
--------------------------------------------------------------------------------
/shortGPT/tracking/README.md:
--------------------------------------------------------------------------------
1 | # Module: Tracking
2 |
3 | ## Goal
4 | The `tracking` module is responsible for tracking and analyzing the usage and cost of various APIs used in the project. It includes two files: `api_tracking.py` and `cost_analytics.py`.
5 |
6 | ## File: api_tracking.py
7 |
8 | ### Class: APITracker
9 | This class is responsible for tracking the usage of APIs and saving the data to a content manager.
10 |
11 | #### Method: `__init__()`
12 | - Initializes the APITracker object.
13 | - Calls the `initiateAPITracking()` method.
14 |
15 | #### Method: `setDataManager(contentManager: ContentDataManager)`
16 | - Sets the content manager for storing the API usage data.
17 | - Raises an exception if the content manager is null.
18 |
19 | #### Method: `openAIWrapper(gptFunc)`
20 | - Wrapper function for OpenAI API calls.
21 | - Saves the API usage data to the content manager.
22 | - Returns the result of the API call.
23 |
24 | #### Method: `elevenWrapper(audioFunc)`
25 | - Wrapper function for Eleven API calls.
26 | - Saves the API usage data to the content manager.
27 | - Returns the result of the API call.
28 |
29 | #### Method: `wrap_turbo()`
30 | - Wraps the `llm_completion` function from the `gpt_utils` module using the `openAIWrapper` method.
31 | - Replaces the original function with the wrapped function.
32 |
33 | #### Method: `wrap_eleven()`
34 | - Wraps the `generateVoice` function from the `audio_generation` module using the `elevenWrapper` method.
35 | - Replaces the original function with the wrapped function.
36 |
37 | #### Method: `initiateAPITracking()`
38 | - Initiates the tracking of APIs by wrapping the necessary functions using the `wrap_turbo` and `wrap_eleven` methods.
39 |
40 |
41 | ## File: cost_analytics.py
42 |
43 | ### Function: calculateCostAnalytics()
44 | This function calculates the average usage and cost of OpenAI and Eleven APIs based on the data stored in the content database.
45 |
46 | - Initializes the content database.
47 | - Retrieves the API usage data from the database.
48 | - Calculates the average usage and cost for OpenAI and Eleven APIs.
49 | - Prints the results.
50 |
51 | ### Usage example:
52 | ```python
53 | calculateCostAnalytics()
54 | ```
55 |
56 | Note: The commented code at the end of the file is unrelated and can be ignored.
--------------------------------------------------------------------------------
/shortGPT/tracking/__init__.py:
--------------------------------------------------------------------------------
1 | from . import api_tracking
--------------------------------------------------------------------------------
/shortGPT/tracking/api_tracking.py:
--------------------------------------------------------------------------------
1 | from shortGPT.gpt import gpt_utils
2 | from shortGPT.database.content_data_manager import ContentDataManager
3 | import json
4 |
5 | class APITracker:
6 |
7 | def __init__(self):
8 | self.initiateAPITracking()
9 |
10 | def setDataManager(self, contentManager : ContentDataManager):
11 | if(not contentManager):
12 | raise Exception("contentManager is null")
13 | self.datastore = contentManager
14 |
15 | def openAIWrapper(self, gptFunc):
16 |
17 | def wrapper(*args, **kwargs):
18 | result = gptFunc(*args, **kwargs)
19 | prompt = kwargs.get('prompt') or kwargs.get('conversation') or args[0]
20 | prompt = json.dumps(prompt)
21 | if self.datastore and result:
22 | tokensUsed = gpt_utils.num_tokens_from_messages([prompt, result])
23 | self.datastore.save('api_openai', tokensUsed, add=True)
24 | return result
25 |
26 | return wrapper
27 |
28 | def elevenWrapper(self, audioFunc):
29 |
30 | def wrapper(*args, **kwargs):
31 | result = audioFunc(*args, **kwargs)
32 | textInput = kwargs.get('text') or args[0]
33 | if self.datastore and result:
34 | self.datastore.save('api_eleven', len(textInput), add=True)
35 | return result
36 |
37 | return wrapper
38 |
39 |
40 | def wrap_turbo(self):
41 | func_name = "llm_completion"
42 | module = __import__("gpt_utils", fromlist=["llm_completion"])
43 | func = getattr(module, func_name)
44 | wrapped_func = self.openAIWrapper(func)
45 | setattr(module, func_name, wrapped_func)
46 |
47 | def wrap_eleven(self):
48 | func_name = "generateVoice"
49 | module = __import__("audio_generation", fromlist=["generateVoice"])
50 | func = getattr(module, func_name)
51 | wrapped_func = self.elevenWrapper(func)
52 | setattr(module, func_name, wrapped_func)
53 |
54 |
55 | def initiateAPITracking(self):
56 | self.wrap_turbo()
57 | self.wrap_eleven()
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/shortGPT/tracking/cost_analytics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from shortGPT.database.content_database import ContentDatabase
3 | db = ContentDatabase()
4 | all = []
5 | # Calculate average and price of the average for OpenAI
6 | openai_array = [short.get('api_openai') for short in all]
7 | avr_openai = np.mean(openai_array)
8 | OPENAI_CONST = 0.002 / 1000
9 | price_openai = avr_openai * OPENAI_CONST
10 | max_openai = max(openai_array)
11 | price_max_openai = max_openai * OPENAI_CONST
12 |
13 | # Calculate average and price of the average for Eleven
14 | eleven_array = [short.get('api_openai') for short in all]
15 | avr_eleven = np.mean(eleven_array)
16 | ELEVEN_CONST = 0.3 / 1000
17 | price_eleven = avr_eleven * ELEVEN_CONST
18 | max_eleven = max(eleven_array)
19 | price_max_eleven = max_eleven * ELEVEN_CONST
20 |
21 |
22 |
23 | # Print results
24 | print("OpenAI:")
25 | print("- Average:", avr_openai)
26 | print("- Price of the average:", price_openai)
27 | print("- Max:", max_openai)
28 | print("- Price of the max:", price_max_openai)
29 |
30 | print("Eleven:")
31 | print("- Average:", avr_eleven)
32 | print("- Price of the average:", price_eleven)
33 | print("- Max:", max_eleven)
34 | print("- Price of the max:", price_max_eleven)
35 |
36 |
37 |
38 | # for id in ids:
39 | # builder = AskingRedditorShortBuilder(AR, id)
40 | # print(id, builder.dataManager.getVideoPath())
41 | #createShorts(30, 'AskingRedditors')
42 | # AR = ChannelManager("AskingRedditors")
43 | # newShort = AskingRedditorShortBuilder(channelDB= AR, short_id="FyhKkqx9xDxTEtRpanSD")
44 | # print(newShort.channelDB.getStaticEditingAsset('background_onepiece'))
45 | # print(newShort.channelDB.getStaticEditingAsset('reddit_template_dark'))
46 | # print(newShort.channelDB.getStaticEditingAsset('subscribe_animation'))
47 | #print("Scraping requests remaining: ",image_api.getScrapingCredits())
48 |
49 |
--------------------------------------------------------------------------------
/shortGPT/utils/cli.py:
--------------------------------------------------------------------------------
1 | from shortGPT.utils.requirements import Requirements
2 |
3 |
4 | class CLI:
5 |
6 | @staticmethod
7 | def display_header():
8 | '''Display the header of the CLI'''
9 | CLI.display_green_text('''
10 | .d88888b dP dP .88888. 888888ba d888888P .88888. 888888ba d888888P
11 | 88. "' 88 88 d8' `8b 88 `8b 88 d8' `88 88 `8b 88
12 | `Y88888b. 88aaaaa88 88 88 88aaaa8P' 88 88 88aaaa8P' 88
13 | `8b 88 88 88 88 88 `8b. 88 88 YP88 88 88
14 | d8' .8P 88 88 Y8. .8P 88 88 88 Y8. .88 88 88
15 | Y88888P dP dP `8888P' dP dP dP `88888' dP dP
16 |
17 | ''')
18 | CLI.display_green_text("Welcome to ShortGPT! This is an experimental AI framework to automate all aspects of content creation.")
19 | print("")
20 | CLI.display_requirements_check()
21 |
22 | @staticmethod
23 | def display_help():
24 | '''Display help'''
25 | print("Usage: python shortGPT.py [options]")
26 | print("")
27 | print("Options:")
28 | print(" -h, --help show this help message and exit")
29 |
30 | @staticmethod
31 | def display_requirements_check():
32 | '''Display information about the system and requirements'''
33 | print("Checking requirements...")
34 | requirements_manager = Requirements()
35 | print(" - Requirements : List of requirements and installed version:")
36 | all_req_versions = requirements_manager.get_all_requirements_versions()
37 | for req_name, req_version in all_req_versions.items():
38 | if req_version is None:
39 | CLI.display_red_text(f"---> Error : {req_name} is not installed")
40 | print(f"{req_name}=={req_version}")
41 |
42 | print("")
43 | # Skipping for now, because it assumes package have the same name as the python import itself, which is not true most sometimes.
44 | # if not requirements_manager.is_all_requirements_installed():
45 | # CLI.display_red_text("Error : Some requirements are missing")
46 | # print("Please install the missing requirements using the following command :")
47 | # print("pip install -r requirements.txt")
48 | # print("")
49 | # requirements_manager.get_all_requirements_not_installed()
50 | # print("")
51 |
52 | class bcolors:
53 | HEADER = '\033[95m'
54 | OKBLUE = '\033[94m'
55 | OKCYAN = '\033[96m'
56 | OKGREEN = '\033[92m'
57 | WARNING = '\033[93m'
58 | FAIL = '\033[91m'
59 | ENDC = '\033[0m'
60 | BOLD = '\033[1m'
61 | UNDERLINE = '\033[4m'
62 |
63 | @staticmethod
64 | def display_error(error_message, stack_trace):
65 | '''Display an error message in the console'''
66 | print(CLI.bcolors.FAIL + "ERROR : " + error_message + CLI.bcolors.ENDC)
67 | print(stack_trace)
68 | print("If the problem persists, don't hesitate to contact our support. We're here to assist you.")
69 | print("Get Help on Discord : https://discord.gg/qn2WJaRH")
70 |
71 | @staticmethod
72 | def get_console_green_text(text):
73 | '''Get the text in green color'''
74 | return CLI.bcolors.OKGREEN + text + CLI.bcolors.ENDC
75 |
76 | @staticmethod
77 | def get_console_red_text(text):
78 | '''Get the text in red color'''
79 | return CLI.bcolors.FAIL + text + CLI.bcolors.ENDC
80 |
81 | @staticmethod
82 | def get_console_yellow_text(text):
83 | '''Get the text in yellow color'''
84 | return CLI.bcolors.WARNING + text + CLI.bcolors.ENDC
85 |
86 | @staticmethod
87 | def get_console_blue_text(text):
88 | return CLI.bcolors.OKBLUE + text + CLI.bcolors.ENDC
89 |
90 | @staticmethod
91 | def get_console_bold_text(text):
92 | return CLI.bcolors.BOLD + text + CLI.bcolors.ENDC
93 |
94 | @staticmethod
95 | def get_console_underline_text(text):
96 | return CLI.bcolors.UNDERLINE + text + CLI.bcolors.ENDC
97 |
98 | @staticmethod
99 | def get_console_cyan_text(text):
100 | return CLI.bcolors.OKCYAN + text + CLI.bcolors.ENDC
101 |
102 | @staticmethod
103 | def get_console_header_text(text):
104 | return CLI.bcolors.HEADER + text + CLI.bcolors.ENDC
105 |
106 | @staticmethod
107 | def get_console_text(text, color):
108 | return color + text + CLI.bcolors.ENDC
109 |
110 | @staticmethod
111 | def display_blue_text(text):
112 | print(CLI.get_console_blue_text(text))
113 |
114 | @staticmethod
115 | def display_green_text(text):
116 | print(CLI.get_console_green_text(text))
117 |
118 | @staticmethod
119 | def display_red_text(text):
120 | print(CLI.get_console_red_text(text))
121 |
122 | @staticmethod
123 | def display_yellow_text(text):
124 | print(CLI.get_console_yellow_text(text))
125 |
126 | @staticmethod
127 | def display_bold_text(text):
128 | print(CLI.get_console_bold_text(text))
129 |
130 | @staticmethod
131 | def display_underline_text(text):
132 | print(CLI.get_console_underline_text(text))
133 |
134 | @staticmethod
135 | def display_cyan_text(text):
136 | print(CLI.get_console_cyan_text(text))
137 |
138 | @staticmethod
139 | def display_header_text(text):
140 | print(CLI.get_console_header_text(text))
141 |
--------------------------------------------------------------------------------
/shortGPT/utils/requirements.py:
--------------------------------------------------------------------------------
1 | import os
2 | import platform
3 |
4 |
5 | class Requirements:
6 | '''Manage requirements for the project'''
7 |
8 | def __init__(self):
9 | self.package_path = os.path.dirname(os.path.realpath(__file__))
10 | self.requirements_path = os.path.join(self.package_path, '..', '..', 'requirements.txt')
11 |
12 | def get_list_requirements(self):
13 | '''Get the list of requirements packages from requirements.txt'''
14 | with open(self.requirements_path) as f:
15 | requirements = f.read().splitlines()
16 |
17 | # remove comments and empty lines
18 | requirements = [line for line in requirements if not line.startswith('#')]
19 | requirements = [line for line in requirements if line.strip()]
20 |
21 | # extract package name from protocol
22 | requirements = [line.split('/')[-1] for line in requirements if not line.startswith('git+')]
23 | requirements = [line.split('/')[-1] for line in requirements if not line.startswith('http')]
24 | requirements = [line.split('/')[-1] for line in requirements if not line.startswith('https')]
25 | requirements = [line.split('/')[-1] for line in requirements if not line.startswith('ssh')]
26 | requirements = [line.split('/')[-1] for line in requirements if not line.startswith('git')]
27 |
28 | # sort alphabetically
29 | requirements.sort()
30 |
31 | return requirements
32 |
33 | def get_os_name(self):
34 | '''Get the name of the operating system'''
35 | return platform.system()
36 |
37 | def get_os_version(self):
38 | '''Get the version of the operating system'''
39 | return platform.version()
40 |
41 | def get_python_version(self):
42 | '''Get the version of Python installed'''
43 | return platform.python_version()
44 |
45 | def is_all_requirements_installed(self):
46 | '''Check if all requirements are installed'''
47 | requirements = self.get_list_requirements()
48 | for requirement in requirements:
49 | if not self.is_requirement_installed(requirement):
50 | return False
51 | return True
52 |
53 | def is_requirement_installed(self, package_name):
54 | '''Check if a package is installed'''
55 | import importlib
56 | try:
57 | importlib.import_module(package_name)
58 | return True
59 | except ImportError:
60 | return False
61 |
62 | def get_version(self, package_name):
63 | '''Get the version of a package'''
64 | import pkg_resources
65 | try:
66 | return pkg_resources.get_distribution(package_name).version
67 | except:
68 | return None
69 |
70 | def get_all_requirements_versions(self):
71 | '''Get the versions of all requirements'''
72 | requirements = self.get_list_requirements()
73 | versions = {}
74 | for requirement in requirements:
75 | versions[requirement] = self.get_version(requirement)
76 | return versions
77 |
78 | def get_all_requirements_not_installed(self):
79 | '''Get the list of all requirements not installed'''
80 | requirements = self.get_list_requirements()
81 | not_installed = {}
82 | for requirement in requirements:
83 | # if version is None then the package is not installed
84 | if self.get_version(requirement) is None:
85 | not_installed[requirement] = self.get_version(requirement)
86 | return not_installed
87 |
88 |
89 | if __name__ == '__main__':
90 | '''Display information about the system and requirements'''
91 | requirements_manager = Requirements()
92 | # Skipping for now, because it assumes package have the same name as the python import itself, which is not true most sometimes.
93 | # if not requirements_manager.is_all_requirements_installed():
94 | # print("Error : Some requirements are missing")
95 | # print("Please install all requirements from requirements.txt")
96 | # print("You can install them by running the following command:")
97 | # print("pip install -r requirements.txt")
98 |
99 | print(f"System information:")
100 | print(f"OS name : {requirements_manager.get_os_name()}")
101 | print(f"OS version : {requirements_manager.get_os_version()}")
102 | print(f"Python version : {requirements_manager.get_python_version()}")
103 |
104 | # list all requirements and their versions
105 | print("List of all requirements and their versions:")
106 | all_req_versions = requirements_manager.get_all_requirements_versions()
107 | for req_name, req_version in all_req_versions.items():
108 | print(f"{req_name}=={req_version}")
109 |
110 | print("List of all requirements not installed:")
111 | all_req_not_installed = requirements_manager.get_all_requirements_not_installed()
112 | for req_name, req_version in all_req_not_installed.items():
113 | print(f"{req_name}=={req_version}")
114 |
--------------------------------------------------------------------------------
/videos/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
5 | !archive/
--------------------------------------------------------------------------------
/videos/archive/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
--------------------------------------------------------------------------------