├── .gitignore
├── .travis.yml
├── LICENSE
├── readme.md
├── requirements.txt
└── src
├── .DS_Store
├── __init__.py
├── __main__.py
├── __version__.py
├── cust_utils
├── __init__.py
├── browser_instance.py
├── path_util.py
└── utils.py
├── movies.py
└── movies_api.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /workspace.xml
3 | src/.idea/*
4 | src/dist/*
5 | *.pyc
6 | .idea/*
7 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | matrix:
2 | include:
3 | - language: python
4 | python:
5 | - 2.7
6 | dist: trusty
7 | install:
8 | - "pip install -r requirements.txt"
9 | script:
10 | - cd src
11 | - python __main__.py --build
12 | - cd ..
13 | notifications:
14 | email:
15 | - xonshiz@gmail.com
16 | - language: python
17 | python:
18 | - 3.5
19 | dist: trusty
20 | install:
21 | - "pip install -r requirements.txt"
22 | script:
23 | - cd src
24 | - python __main__.py --build
25 | - cd ..
26 | notifications:
27 | email:
28 | - xonshiz@gmail.com
29 | - os: linux
30 | language: python
31 | python:
32 | - 3.8
33 | dist: xenial
34 | before_install:
35 | - "pip install --upgrade pip"
36 | install:
37 | - "python --version"
38 | - "pip install -r requirements.txt"
39 | - "pip install pyinstaller"
40 | script:
41 | - cd src
42 | - python __main__.py --build
43 | - pyinstaller --onefile --hidden-import=queue "__main__.py" -n "new_movies_123_dl_linux"
44 | - ls
45 | - cd ..
46 | notifications:
47 | email:
48 | - xonshiz@gmail.com
49 | before_deploy:
50 | - export TRAVIS_TAG="1.0.$TRAVIS_BUILD_NUMBER"
51 | - echo "$TRAVIS_TAG" "$TRAVIS_COMMIT"
52 | - git config --local user.name "$USER_NAME"
53 | - git config --local user.email "$USER_EMAIL"
54 | - git tag "$TRAVIS_TAG" "$TRAVIS_COMMIT"
55 | deploy:
56 | - provider: releases
57 | tag_name: $TRAVIS_TAG
58 | overwrite: true
59 | api_key: $GITHUB_TOKEN
60 | name: "new_movies_123_dl"
61 | file: "src/dist/new_movies_123_dl_linux"
62 | skip_cleanup: true
63 | draft: false
64 | on:
65 | branch: master
66 | - os: windows
67 | language: sh
68 | python: "3.8"
69 | before_install:
70 | - choco install python --version 3.8.0
71 | - python --version
72 | - export PATH="/c/Python38:/c/Python38/Scripts:$PATH"
73 | - python -m pip install --upgrade pip
74 | env: PATH=/c/Python38:/c/Python38/Scripts:$PATH
75 | install:
76 | - "pip install -r requirements.txt"
77 | - "pip install pyinstaller"
78 | script:
79 | - cd src
80 | - python __main__.py --build
81 | - pyinstaller --onefile --hidden-import=queue "__main__.py" -n "new_movies_123_dl.exe"
82 | - ls
83 | - cd ..
84 | notifications:
85 | email:
86 | - xonshiz@gmail.com
87 | before_deploy:
88 | - export TRAVIS_TAG="1.0.$TRAVIS_BUILD_NUMBER"
89 | - echo "$TRAVIS_TAG" "$TRAVIS_COMMIT"
90 | - git config --local user.name "$USER_NAME"
91 | - git config --local user.email "$USER_EMAIL"
92 | - git tag "$TRAVIS_TAG" "$TRAVIS_COMMIT"
93 | deploy:
94 | - provider: releases
95 | tag_name: $TRAVIS_TAG
96 | overwrite: true
97 | api_key: $GITHUB_TOKEN
98 | name: "new_movies_123_dl"
99 | file: "src/dist/new_movies_123_dl.exe"
100 | skip_cleanup: true
101 | draft: false
102 | on:
103 | branch: master
104 | - os: osx
105 | language: sh
106 | python: "3.8"
107 | before_install:
108 | - python3 --version
109 | - python3 -m pip install --upgrade pip
110 | install:
111 | - "pip install -r requirements.txt"
112 | - "pip install pyinstaller"
113 | script:
114 | - cd src
115 | - python3 __main__.py --build
116 | - pyinstaller --onefile --hidden-import=queue "__main__.py" -n "new_movies_123_dl_osx"
117 | - ls
118 | - cd ..
119 | notifications:
120 | email:
121 | - xonshiz@gmail.com
122 | before_deploy:
123 | - export TRAVIS_TAG="1.0.$TRAVIS_BUILD_NUMBER"
124 | - echo "$TRAVIS_TAG" "$TRAVIS_COMMIT"
125 | - git config --local user.name "$USER_NAME"
126 | - git config --local user.email "$USER_EMAIL"
127 | deploy:
128 | - provider: releases
129 | tag_name: $TRAVIS_TAG
130 | overwrite: true
131 | api_key: $GITHUB_TOKEN
132 | name: "new_movies_123_dl"
133 | file: "src/dist/new_movies_123_dl_osx"
134 | skip_cleanup: true
135 | draft: false
136 | on:
137 | branch: master
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | The MIT License (MIT)
3 |
4 | Copyright (c) 2013-2021 Blackrock Digital LLC.
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # [new-movies123.co Downloader](https://new-movies123.co/) | [](https://travis-ci.com/github/Xonshiz/123MoviesRIpper) [](https://github.com/Xonshiz/123MoviesRIpper/releases/) [](https://github.com/xonshiz/123MoviesRIpper/releases)
2 | Script to bypass resolution and IP Limit on new-movies123.co
3 |
4 | This script will download both, an mp4 1080p (or the next highest available stream) and subtitles (.srt).
5 |
6 | Just download the proper binary and execute it. It'll ask for the URL, paste the URL and wait till the script downloads the series.
7 |
8 | **NOTE** : Must have youtube-dl installed and available in path.
9 |
10 | # Things To Know
11 | - It'll work for TV Series.
12 | - I have not yet tested this with Movies and I'm 90% sure that it'll break.
13 | - Required "Youtube-dl" to download the video streams.
14 | - It's best to provide URL of the 1st episode, because this script doesn't have proper validations and will throw errors if a file already exists.
15 | - If you can't find binary for your operating system, that means TravisCI had issues making a binary and you would need to install python and download the code from this repository and then run it yourself.
16 |
17 | # Running Python Code
18 | - Download the code from this repository.
19 | - Open terminal and go into 123MoviesRipper directory and run this command: `pip install -r requirements.txt`.
20 | - Then you can move into src directory in your terminal.
21 | - When you're into src directory, run this command: `python __main__.py`
22 |
23 |
24 | P.S: This is a script I hacked around last night, because amazon prime decided not to make season 3 of "Deutschland 83" available in most regions and this website was the only way to grab the episodes.
25 |
26 | Welp, use with caution. And as usual, no harm intended via this script.
27 |
28 | Please do use the script carefully, don't hog the website bandwidth.
29 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | bs4
2 | pathlib
3 | requests
--------------------------------------------------------------------------------
/src/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xonshiz/123MoviesRIpper/014fb35660ed34340b9d1a15b9f977e37fdf256e/src/.DS_Store
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | # #!/usr/bin/env python
2 | # # -*- coding: utf-8 -*-
3 |
--------------------------------------------------------------------------------
/src/__main__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | import sys
6 | sys.path.append("..")
7 | from movies import *
8 |
9 | if __name__ == "__main__":
10 | Movies(sys.argv[1:], os.getcwd())
11 | sys.exit()
12 |
--------------------------------------------------------------------------------
/src/__version__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | __version__ = "2021.01.13"
5 |
--------------------------------------------------------------------------------
/src/cust_utils/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from . import path_util
5 | from . import utils
6 | from . import browser_instance
7 |
--------------------------------------------------------------------------------
/src/cust_utils/browser_instance.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import requests
4 | import json
5 | from random import random
6 | import logging
7 |
8 |
9 | def get_user_agent():
10 | user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
11 | user_agent = user_agent + ' Chrome/56.0.2924.87 Safari/537.36'
12 | return user_agent
13 |
14 |
15 | def get_request(url, text_only=False, **kwargs):
16 | _proxy = kwargs.get("proxy")
17 | _rand_proxy = None
18 | headers = {
19 | 'User-Agent': get_user_agent(),
20 | 'Accept-Encoding': 'gzip, deflate',
21 | }
22 | if kwargs.get('xml_http_request', False):
23 | headers['X-Requested-With'] = 'XMLHttpRequest'
24 |
25 | if _proxy and len(_proxy) > 0:
26 | try:
27 | _rand_proxy = random.choice(_proxy)
28 | except IndexError as error:
29 | print("Proxy Failed : {0}".format(error))
30 | print("Continuing Without Proxy.")
31 | _rand_proxy = None
32 |
33 | proxy = {
34 | "http": _rand_proxy,
35 | "https": _rand_proxy
36 | }
37 |
38 | logging.debug('GET url: {0}'.format(url))
39 | logging.debug('GET proxy: {0}'.format(proxy))
40 |
41 | sess = requests.session()
42 | connection = sess.get(url, headers=headers, proxies=proxy)
43 |
44 | if connection.status_code != 200:
45 | print("Whoops! Seems like I can't connect to website.")
46 | print("It's showing : %s" % connection)
47 | print("Run this script with the --verbose argument and report the issue along with log file on Github.")
48 | print("Can't connect to website %s" % url)
49 | return None
50 | else:
51 | if text_only:
52 | return connection.content
53 | return json.loads(connection.text.encode("utf-8"))
54 |
55 |
56 | def post_request(url, data, cookie_value, **kwargs):
57 | _proxy = kwargs.get("proxy")
58 | _rand_proxy = None
59 | if not cookie_value:
60 | raise Warning("No Cookie Value Provided. Exiting")
61 | headers = {
62 | 'User-Agent': get_user_agent(),
63 | 'Accept-Encoding': 'gzip, deflate, br',
64 | 'Accept': '*/*',
65 | 'Content-Type': 'application/json',
66 | 'Cookie': cookie_value
67 | }
68 | if _proxy and len(_proxy) > 0:
69 | try:
70 | _rand_proxy = random.choice(_proxy)
71 | except IndexError as error:
72 | print("Proxy Failed : {0}".format(error))
73 | print("Continuing Without Proxy.")
74 | _rand_proxy = None
75 |
76 | proxy = {
77 | "http": _rand_proxy,
78 | "https": _rand_proxy
79 | }
80 | logging.debug('POST url: {0}'.format(url))
81 | logging.debug('POST proxy: {0}'.format(proxy))
82 | sess = requests.session()
83 | connection = sess.post(url, data=data, headers=headers, proxies=proxy)
84 |
85 | if connection.status_code != 200:
86 | print("Whoops! Seems like I can't connect to website.")
87 | print("It's showing : %s" % connection)
88 | print("Run this script with the --verbose argument and report the issue along with log file on Github.")
89 | print("Can't connect to website %s" % url)
90 | return None
91 | else:
92 | return json.loads(connection.text.encode("utf-8"))
93 |
--------------------------------------------------------------------------------
/src/cust_utils/path_util.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import os
4 | from pathlib import Path
5 |
6 |
7 | def get_abs_path_name(file_path, file_name):
8 | return os.path.abspath(file_path + file_name)
9 |
10 |
11 | def file_exists(file_path, file_name):
12 | return os.path.isfile(get_abs_path_name(file_path, file_name))
13 |
14 |
15 | def create_paths(directory):
16 | Path(os.path.abspath(directory)).mkdir(parents=True, exist_ok=True)
17 | return os.path.abspath(directory)
18 |
--------------------------------------------------------------------------------
/src/cust_utils/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | from . import path_util
4 | import subprocess
5 |
6 |
7 | def create_file(file_path, file_name, data_to_write):
8 | if not isinstance(data_to_write, str):
9 | data_to_write = str(data_to_write)
10 | if not data_to_write or not str(data_to_write).strip():
11 | print("Empty data provided for {0}".format(file_name))
12 | return False
13 | file_location = path_util.get_abs_path_name(file_path, file_name)
14 | with open(file_location, 'w') as f:
15 | f.write(data_to_write)
16 | f.flush()
17 | return True
18 |
19 |
20 | def create_file_binary_mode(file_path, file_name, data_to_write):
21 | if not data_to_write or not str(data_to_write).strip():
22 | print("Empty data provided for {0}".format(file_name))
23 | return False
24 | file_location = path_util.get_abs_path_name(file_path, file_name)
25 | with open(file_location, 'wb') as f:
26 | f.write(data_to_write)
27 | f.flush()
28 | return True
29 |
30 |
31 | def read_file_data(file_path, file_name):
32 | file_location = path_util.get_abs_path_name(file_path, file_name)
33 | content = None
34 | with open(file_location, 'r') as f:
35 | content = f.read().strip()
36 | return None if content == "" else content
37 |
38 |
39 | def get_clean_path_name(path_name):
40 | for cha in '\/*?:"<>|,;\'':
41 | path_name = path_name.replace(cha, ' -')
42 | return path_name
43 |
44 |
45 | def get_youtube_dl_command(file_location, video_url):
46 | command = 'youtube-dl -i "{0}" -o "{1}"'.format(video_url, file_location)
47 | return command
48 |
49 |
50 | def call_youtube_dl(youtube_dl_command):
51 | process = subprocess.Popen(youtube_dl_command, shell=True, stdout=subprocess.PIPE)
52 | process.wait()
53 | return process.returncode
54 |
--------------------------------------------------------------------------------
/src/movies.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | import sys
6 | import re
7 | import argparse
8 | import logging
9 | import platform
10 | from cust_utils import *
11 | from __version__ import __version__
12 | from movies_api import *
13 | from bs4 import BeautifulSoup
14 |
15 |
16 | class Movies:
17 | def __init__(self, argv, cwd):
18 | if len(argv) > 0:
19 | print("Arguments: {0}".format(argv))
20 | if argv[0] == '--build':
21 | print("Got the build working check. Exiting.")
22 | sys.exit(0)
23 | self.queue = []
24 | related_episodes = []
25 | url = None
26 | file_name = None
27 | video_file_name = None
28 | base_page_content = None
29 | youtube_dl_command = None
30 | # url = 'https://new-movies123.co/tv-series/deutschland-89-season-1/UyTSbjuh/917zdf25/abfjn1jk-watch-online-for-free.html'
31 | resolution = '1080'
32 | while not url:
33 | url = input("Enter Movie URL : ").strip()
34 | if url:
35 | self.get_episode_list(url=url)
36 |
37 | def single_episode(self, url, file_name, add_to_queue=False):
38 | xml_request_data = get_xml_http_request(url)
39 | if xml_request_data and xml_request_data == "none":
40 | print("IP Limit Reached")
41 | sys.exit(1)
42 | if xml_request_data:
43 | xml_request_data = dict(xml_request_data[0])
44 | video_url = xml_request_data.get('src', None)
45 | max_resolution = xml_request_data.get('max', None)
46 | if not video_url:
47 | video_url = xml_request_data.get('file', None)
48 | if not video_url:
49 | print("Couldn't get Video Stream URL.")
50 | sys.exit(1)
51 | if video_url:
52 | print("Got Video Stream.")
53 | if max_resolution:
54 | print("Will Be Downloading {0} Stream.".format(max_resolution))
55 | video_url = str(video_url).replace('/360?', '/{0}?'.format(max_resolution))
56 | else:
57 | print("Couldn't Find Max Resolution. Going with default {0}.".format(
58 | xml_request_data.get('label', '')))
59 |
60 | base_page_content = get_http_request(url, text_only=True)
61 | if not base_page_content:
62 | print("Can't Parse Basic Info")
63 | # ASK USER FOR FILE NAME
64 | while not file_name:
65 | file_name = input("Enter file name : ").strip()
66 | else:
67 | soup = BeautifulSoup(base_page_content, 'html.parser')
68 |
69 | video_metadata = soup.find_all('script', type='application/ld+json')
70 | if not video_metadata:
71 | print("Can't find metadata")
72 | if len(video_metadata) > 1:
73 | metadata_json = str(video_metadata[0]).replace('', '')
75 | season_metadata = dict(json.loads(str(metadata_json)))
76 | current__episode_metadata_json = str(video_metadata[1]).replace(
77 | '', '')
78 | current_video_metadata = dict(json.loads(current__episode_metadata_json))
79 | current_episode_list = current_video_metadata.get('itemListElement')
80 | if current_episode_list and len(current_episode_list) > 0:
81 | episode_dict = dict(current_episode_list[-1])
82 | episode_item = episode_dict.get('item')
83 | current_episode_name = utils.get_clean_path_name(dict(episode_item).get('name'))
84 | file_name = '{0}.srt'.format(current_episode_name)
85 | video_file_name = '{0}.mp4'.format(current_episode_name)
86 | subs_json = re.search(r'window.subtitles = (.*?)', str(base_page_content))
87 | if subs_json:
88 | subtitle_info_list = eval(subs_json.group(1))
89 | subtitle_info = dict(subtitle_info_list[0]).get('src')
90 | if subtitle_info:
91 | subtitle_src = str(subtitle_info).replace('\\', '')
92 | subtitle_content = browser_instance.get_request(subtitle_src, text_only=True)
93 | series_name = url.split('/')[4]
94 | if not path_util.file_exists('dist', os.sep + series_name):
95 | path_created = path_util.create_paths('dist' + os.sep + series_name + os.sep + current_episode_name)
96 | if path_created:
97 | file_written = utils.create_file_binary_mode(path_created, os.sep + file_name, subtitle_content)
98 | if file_written:
99 | print("Downloaded : {0}".format(file_name))
100 | yt_command = utils.get_youtube_dl_command(file_location=path_created + os.sep + video_file_name, video_url=video_url)
101 | if add_to_queue:
102 | print("Added To Queue")
103 | self.queue.append(yt_command)
104 | else:
105 | print("Youtube-dl Command: {0}".format(yt_command))
106 | process_code = utils.call_youtube_dl(yt_command)
107 | print("Process Done: {0}".format(process_code))
108 | return 0
109 |
110 | def get_episode_list(self, url):
111 | related_episodes = []
112 | base_page_content = get_http_request(url, text_only=True)
113 | soup = BeautifulSoup(base_page_content, 'html.parser')
114 |
115 | video_metadata = soup.find_all('script', type='application/ld+json')
116 | if not video_metadata:
117 | print("Can't find metadata")
118 | if len(video_metadata) > 1:
119 | metadata_json = str(video_metadata[0]).replace('', '')
121 | season_metadata = dict(json.loads(str(metadata_json)))
122 | # current__episode_metadata_json = str(video_metadata[1]).replace('', '')
123 | # current_video_metadata = dict(json.loads(current__episode_metadata_json))
124 | episodes = season_metadata.get('episode', [])
125 | for episode in episodes:
126 | url = dict(episode).get('url', None)
127 | if url:
128 | related_episodes.append(str(url))
129 | print("Total Episodes To Download: {0}".format(len(related_episodes)))
130 | for episode_url in related_episodes:
131 | self.single_episode(url=episode_url, file_name=None, add_to_queue=True)
132 | for current_command in self.queue:
133 | print(current_command)
134 | process_code = utils.call_youtube_dl(current_command)
135 | print("Process Done: {0}".format(process_code))
136 | return 0
137 |
138 |
--------------------------------------------------------------------------------
/src/movies_api.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import cust_utils
5 | import json
6 |
7 | BASE_URL = 'https://discover.hulu.com/content/v3/entity'
8 |
9 |
10 | def get_xml_http_request(movie_url):
11 | response = cust_utils.browser_instance.get_request(url=movie_url, xml_http_request=True)
12 | return response
13 |
14 |
15 | def get_http_request(movie_url, text_only=False):
16 | response = cust_utils.browser_instance.get_request(url=movie_url, text_only=text_only)
17 | return response
18 |
--------------------------------------------------------------------------------