├── src ├── __init__.py └── cheermeup.py ├── .gitignore ├── LICENSE ├── setup.py └── README.md /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .linuxenv/ 2 | .winenv/ 3 | __pycache__/ 4 | dist/ 5 | build/ 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2019 Gautham Venkataraman 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | import os 3 | 4 | def read(fname): 5 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 6 | 7 | setuptools.setup( 8 | name = 'cheermeup', 9 | version = '1.0.1', 10 | description = 'Command line program to play random animal videos.', 11 | long_description = read('README.md'), 12 | long_description_content_type='text/markdown', 13 | license_files = ('LICENSE',), 14 | url = 'https://github.com/andohuman/cheermeup', 15 | download_url = 'https://github.com/andohuman/cheermeup/archive/refs/tags/v1.0.1.tar.gz', 16 | author = 'Gautham Venkataraman', 17 | author_email = 'gauthsvenkat@gmail.com', 18 | maintainer = 'Gautham Venkataraman', 19 | maintainer_email = 'gauthsvenkat@gmail.com', 20 | packages = setuptools.find_packages(), 21 | install_requires = ['opencv-python', 'praw', 'appdirs'], 22 | entry_points = { 23 | 'console_scripts': [ 24 | 'cheermeup=src.cheermeup:main', 25 | ], 26 | }, 27 | ) 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cheermeup 2 | Command line program that plays random animal from your favorite animal subreddits! 3 | 4 | ## Standalone [executable](https://github.com/andohuman/cheermeup/releases/latest) (windows only) 5 | 6 | ![](https://i.imgur.com/uHT9ygp.gif) 7 | 8 | 9 | ## As a commandline app (available on windows and unix) 10 | 11 | ![](https://i.imgur.com/DPDNtVJ.gif) 12 | 13 | Created by [u/andohuman](https://www.reddit.com/user/andohuman) | [@andohuman](https://twitter.com/andohuman) 14 | 15 | ## 1. Getting started 16 | This app works by scraping videos from a some of the popular animal subreddits. 17 | 18 | If you want to run the standalone binary (windows only), download it from [here](https://github.com/andohuman/cheermeup/releases/latest). 19 | 20 | Else if you want to install as a python module and mess with the code, stick around. 21 | 22 | ## 2. Installation 23 | 24 | Before we proceed with the actual installation, consider installing this package in a virtual python environment. 25 | 26 | ### Installing venv 27 | Install the virtualenv python package with the following command :- 28 | ```pip3 install virtualenv``` 29 | 30 | You can now go ahead and create a virtual environment with the following command :- 31 | ``` python3 -m venv YOUR_ENV_NAME/``` 32 | 33 | Now activate the virtual environment you just created :- 34 | #### On windows 35 | ``` YOUR_ENV_NAME\Scripts\activate``` 36 | #### On unix systems 37 | ``` source YOUR_ENV_NAME/bin/activate``` 38 | 39 | You can now proceed with the installation of the package 40 | 41 | ### Installing using pip 42 | You can use the python package manager pip to install this package, although it is recommended that you install the package from source for the latest updates. 43 | 44 | Run ```pip3 install cheermeup``` 45 | 46 | ### Installing from source (Recommended) 47 | 1. Clone this repository 48 | ```git clone https://github.com/andohuman/cheermeup.git``` 49 | 50 | If you're on windows you can download the zip from [here](https://github.com/andohuman/cheermeup/archive/master.zip) and extract the archive. 51 | 52 | 2. cd into the folder and install the package by executing 53 | 54 | ```cd cheermeup``` 55 | 56 | ```pip3 install .``` 57 | 58 | Note: If you're installing on linux without virtualenv you might have to add ```/home/{YOUR_USERNAME}/.local/bin/``` to your ```$PATH```. 59 | 60 | * Run the app by executing ```cheermeup```. 61 | 62 | Note: When you run the program for the first time (of the day by default) it may take a couple of seconds, depending on your internet connection and speed to fetch the links and cache them. 63 | 64 | If you want to scrape videos from your own subreddits you can do so with the following argument 65 | ```cheermeup --subreddits SUBREDDIT_NAME SUBREDDIT_NAME SUBREDDIT_NAME ...``` 66 | 67 | The full list of available arguments are :- 68 | ``` 69 | usage: cheermeup.py [-h] [--client-id CLIENT_ID] [--user-agent USER_AGENT] [--regex REGEX] 70 | [--cache-file-name CACHE_FILE_NAME] [--period PERIOD] [--search-limit SEARCH_LIMIT] 71 | [--force-rewrite-cache] [--tries TRIES] [--subreddits SUBREDDITS [SUBREDDITS ...]] 72 | 73 | Play random animal videos from the commandline 74 | 75 | optional arguments: 76 | -h, --help show this help message and exit 77 | --client-id CLIENT_ID 78 | client id for praw module to scrape reddit 79 | --user-agent USER_AGENT 80 | user agent for praw module to scrape reddit 81 | --regex REGEX regex pattern to match for video urls 82 | --cache-file-name CACHE_FILE_NAME 83 | file name to cache urls for furture retrieval 84 | --period PERIOD timeperiod in seconds after which a cache file is considered old. Default = 1 day 85 | --search-limit SEARCH_LIMIT 86 | maximum number of posts to scrape from a specific subreddit 87 | --force-rewrite-cache 88 | force rewrite cache even if it is not old 89 | --tries TRIES maximum number of tries to play a video before giving up 90 | --subreddits SUBREDDITS [SUBREDDITS ...] 91 | list of subreddits to search for videos 92 | ``` 93 | 94 | By default cheermeup only scrapes videos from the following subreddits:- 95 | 96 | babyelephantgifs 97 | partyparrot 98 | animalsbeingjerks 99 | animalsbeingderps 100 | animalsbeingconfused 101 | whatswrongwithyourdog 102 | startledcats 103 | zoomies 104 | 105 | If you would like to checkout more options head over to [r/MadeMeSmile](https://www.reddit.com/r/MadeMeSmile/wiki/related-sub-suggestions)'s list of good subreddits and add your own subreddits with the ```--subreddits``` argument. 106 | 107 | It has to be mentioned that the more subreddits you add the slower it will take if the app is caching before playing the video (usually the first run of the day). 108 | 109 | If you're seeing the same videos over and over again and would like to refresh the cache run the app with the ```--force-rewrite-cache``` flag. 110 | 111 | ## 2.1 (optional | windows only) Instructions to build standalone executable 112 | We would need to install the pyinstaller python package which lets us build an executable from a given python source. 113 | 114 | Run ```pip3 install pyinstaller``` 115 | 116 | cd into the src directory and execute 117 | ```pyinstaller -w -F cheermeup.py``` 118 | 119 | The ```-w``` flag supresses terminal window and the ```-F``` makes pyinstaller procduces only one single executable file 120 | You will find the newly built binary ```cheermeup.exe``` in the ```dist``` folder. 121 | 122 | 123 | 124 | ## 3. Improvements (help needed) 125 | 126 | 1. Please note that this is an alpha project I put together in a couple of hours and it might not exactly be stable all the time. Feel free to raise an issue and I'll try and look into it 127 | 128 | 2. I also want to release linux and macos binaries but pyinstaller and cx_freeze won't play well with cv2, which is a dependancy and I also don't have a mac to test this tool on. If anyone would like to help/contribute (in this regard or any other bugs you find) please issue a pull request. 129 | -------------------------------------------------------------------------------- /src/cheermeup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import praw 3 | import random 4 | import os 5 | import appdirs 6 | import urllib 7 | import re 8 | import cv2 9 | import time 10 | import argparse 11 | import multiprocessing 12 | import functools 13 | 14 | 15 | def is_not_old(filepath, period): 16 | return not (time.time() - os.path.getmtime(filepath) > period) 17 | 18 | 19 | def url_is_not_image(url): 20 | return (not (url.endswith('.jpg') or url.endswith('.png') or url.endswith('.jpeg'))) \ 21 | and (url.find('redd.it') > -1 or url.find('gfycat') > -1 or url.find('imgur') > -1) #make sure that the post contains videos only from these domains 22 | 23 | 24 | def scrape_subreddit(reddit, search_limit, subreddit_name): 25 | 26 | all_urls = [submission.url for submission in reddit.subreddit(subreddit_name).hot(limit = search_limit)] #search for hot posts in a specific subreddit 27 | non_image_urls = list(filter(url_is_not_image, all_urls)) #filter out image urls 28 | 29 | return non_image_urls 30 | 31 | 32 | def write_urls_to_cache(urls, cache_file_path): 33 | urls = random.sample(urls, len(urls)) #shuffle the urls a bit before writing them to cache 34 | with open(cache_file_path, 'w') as file: 35 | for url in urls: 36 | file.write("{}\n".format(url)) 37 | return urls 38 | 39 | 40 | def gifvmp4(url): 41 | return url.replace('.gifv', '.mp4') if url.endswith('.gifv') else url #cv2 can't read gifv links correctly so we change them to mp4 links 42 | 43 | 44 | def get_post_urls(args): 45 | 46 | cache_file_path = os.path.join(appdirs.user_cache_dir(), args.cache_file_name) 47 | 48 | if os.path.exists(cache_file_path) and (is_not_old(cache_file_path, args.period)) and (not args.force_rewrite_cache): 49 | with open(cache_file_path, 'r') as file: 50 | post_urls = [line.rstrip() for line in file] 51 | 52 | else: 53 | reddit = praw.Reddit(client_id = args.client_id, 54 | client_secret = None, 55 | user_agent = args.user_agent, 56 | check_for_updates=False, 57 | comment_kind="t1", 58 | message_kind="t4", 59 | redditor_kind="t2", 60 | submission_kind="t3", 61 | subreddit_kind="t5", 62 | trophy_kind="t6", 63 | oauth_url="https://oauth.reddit.com", 64 | reddit_url="https://www.reddit.com", 65 | short_url="https://redd.it", 66 | ratelimit_seconds=5, 67 | timeout=16) #create the praw reddit object 68 | num_processes = min(multiprocessing.cpu_count(), len(args.subreddits)) #choosing an optimal number of parallel processes to use for scraping 69 | 70 | map_func = functools.partial(scrape_subreddit, reddit, args.search_limit) #create a partial function because the map function doesn't take more than one argument 71 | 72 | with multiprocessing.Pool(num_processes) as p: 73 | list_of_lists_urls = p.map(map_func, args.subreddits, chunksize=1) #parallely run the scrape_subreddit function for all subreddits 74 | 75 | post_urls = [url for subredditurls in list_of_lists_urls for url in subredditurls] #flatten the list 76 | 77 | post_urls = write_urls_to_cache(post_urls, cache_file_path) 78 | 79 | return post_urls 80 | 81 | 82 | def post_url_to_video_url(url, regex): 83 | 84 | if url.endswith('.mp4') or url.endswith('.gifv') or url.endswith('.webm'): 85 | return gifvmp4(url) 86 | 87 | elif 'gfycat' in url or 'imgur' in url: #we can't directly use imgur or gfycat links and have find the video link in the webpage 88 | 89 | page = urllib.request.urlopen(url).read().decode('utf8') 90 | match = re.search(regex, page) #try and match the video url string with our regex 91 | url = match.group(0) 92 | return gifvmp4(url) 93 | 94 | elif 'v.redd.it' in url: #we can't directly use redd.it links and have find the full video link 95 | for res in ['/DASH_720.mp4', '/DASH_480.mp4', '/DASH_360.mp4']: 96 | try: 97 | urllib.request.urlopen(url + res).read() 98 | return gifvmp4(url + res) 99 | except urllib.error.HTTPError: 100 | continue 101 | 102 | return None #give up 103 | 104 | 105 | def play_video_from_url(url): 106 | 107 | vid = cv2.VideoCapture(url) 108 | rval, frame = vid.read() 109 | 110 | while rval: 111 | cv2.imshow('press \'q\' to quit', frame) 112 | if cv2.waitKey(40) & 0xFF == ord('q'): 113 | break 114 | rval, frame = vid.read() 115 | 116 | vid.release() 117 | cv2.destroyAllWindows() 118 | 119 | 120 | def main(): 121 | 122 | parser = argparse.ArgumentParser(description = 'Play random animal videos from the commandline') 123 | parser.add_argument('--client-id', 124 | type = str, 125 | default = 'vHxB98R_R-8HYA', 126 | help = 'client id for praw module to scrape reddit') 127 | parser.add_argument('--user-agent', 128 | type = str, 129 | default = 'cheermeup', 130 | help = 'user agent for praw module to scrape reddit') 131 | parser.add_argument('--regex', 132 | type = str, 133 | default = 'https:\/\/(thumbs|i)\.(gfycat|imgur)\.com\/[\w-]+\.(mp4|webm)', 134 | help = 'regex pattern to match for video urls') 135 | parser.add_argument('--cache-file-name', 136 | type = str, 137 | default = 'cheermeupurls.txt', 138 | help = 'file name to cache urls for furture retrieval') 139 | parser.add_argument('--period', 140 | type = int, 141 | default = 86400, 142 | help = 'timeperiod in seconds after which a cache file is considered old. Default = 1 day') 143 | parser.add_argument('--search-limit', 144 | type = int, 145 | default = 100, 146 | help = 'maximum number of posts to scrape from a specific subreddit') 147 | parser.add_argument('--force-rewrite-cache', 148 | action = 'store_true', 149 | help = 'force rewrite cache even if it is not old') 150 | parser.add_argument('--tries', 151 | type = int, 152 | default = 5, 153 | help = 'maximum number of tries to play a video before giving up') 154 | parser.add_argument('--subreddits', 155 | type = str, 156 | nargs = '+', 157 | default = ['babyelephantgifs', 158 | 'partyparrot', 159 | 'animalsbeingjerks', 160 | 'animalsbeingderps', 161 | 'animalsbeingconfused', 162 | 'whatswrongwithyourdog', 163 | 'zoomies', 164 | 'startledcats'], 165 | help = 'list of subreddits to search for videos') 166 | args = parser.parse_args() 167 | 168 | post_urls = get_post_urls(args) 169 | 170 | #try n number of times before giving up. 171 | for i in range(args.tries): 172 | post_url = random.choice(post_urls) 173 | 174 | try: 175 | video_url = post_url_to_video_url(post_url, args.regex) 176 | assert video_url is not None 177 | play_video_from_url(video_url) 178 | break 179 | 180 | except: 181 | continue 182 | 183 | 184 | if __name__ == '__main__': 185 | multiprocessing.freeze_support() #required for building windows binary 186 | main() 187 | --------------------------------------------------------------------------------