├── setup.sh ├── .gitignore ├── LICENSE ├── README.md └── opensubtitleparser.py /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'Downloading dataset...' 4 | 5 | #download dataset first, 6 | wget http://opus.lingfil.uu.se/download.php?f=OpenSubtitles/en.tar.gz 7 | 8 | echo 'Unzipping dataset...' 9 | 10 | #Making assumption that user hasn't put any other tar files in folder 11 | #Two tar ball extractions because during testing it downloaded as .gz once? 12 | tar -xvf *.tar 13 | tar -xvf *.gz 14 | 15 | echo 'Extracting dataset...' 16 | 17 | #extract all filesin sub-directories 18 | find . -name '*.gz' -exec gunzip '{}' \; 19 | 20 | echo 'Running python preprocessor...' 21 | 22 | #run python pre-processor 23 | python opensubtitleparser.py 24 | 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *.py~ 5 | # C extensions 6 | *.so 7 | *.DS_STORE 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | OpenSubtitles/ 26 | *.gz 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | 47 | # Translations 48 | *.mo 49 | *.pot 50 | 51 | # Django stuff: 52 | *.log 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Dominik 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # opensubtitles-parser 2 | This automates the process of downloading, extracting, and tokenizing all the text from the opensubtitles dataset into one large corpus text file. Each phrase is on it's own line, and each phrase is delimited by a space separating each token in the phrase. 3 | 4 | **Currently only works in python 2.7.x** 5 | 6 | ## Attribution 7 | 8 | This script uses the data set found at:http://opus.lingfil.uu.se/OpenSubtitles.php 9 | 10 | It was made possible by: Jörg Tiedemann, 2009, News from OPUS - A Collection of Multilingual Parallel Corpora with Tools and Interfaces. In N. Nicolov and K. Bontcheva and G. Angelova and R. Mitkov (eds.) Recent Advances in Natural Language Processing (vol V), pages 237-248, John Benjamins, Amsterdam/Philadelphia 11 | 12 | ## Usage 13 | 14 | When using the script for the first time, run ``./setup.sh ``. This will download the dataset and then parse it. 15 | 16 | If you have already downloaded the dataset, run ``python opensubtitleparser.py ``. 17 | 18 | 19 | ##Notes 20 | 21 | Some words are appearing in the corpus file without a space between tokens (ex/ and the -> andthe) I don't know if this is from the original dataset, or a problem with my script. I need to look more into it. 22 | -------------------------------------------------------------------------------- /opensubtitleparser.py: -------------------------------------------------------------------------------- 1 | ''' 2 | The point of this script is to parse all subtitle xml data for source target pairs 3 | It will assume each line is the target of the previous line. 4 | This will store the text data in a tokenized format, meant to be parsed by a deep learning 5 | framework and put into a pre-processed data file. 6 | ''' 7 | import xml.etree.ElementTree as ET 8 | import argparse 9 | import os 10 | import re 11 | import errno 12 | 13 | raw_file = "raw.txt" 14 | inc = 0 15 | 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser(description='Set parameters for xml parser.') 19 | parser.add_argument('--rootXmlDir', default="OpenSubtitles/en/", 20 | help='Path to root directory of xml files') 21 | parser.add_argument('--dataDir', default="data/", 22 | help='Path to directory process data will be saved.') 23 | args = parser.parse_args() 24 | processed_data_dir = args.dataDir 25 | raw_data_dir = args.rootXmlDir 26 | 27 | files = findXmlFiles(raw_data_dir) 28 | print("Have {} to parse!".format(len(files))) 29 | # Setup folder structure and data file 30 | mkdir_p(processed_data_dir) 31 | for f in files: 32 | try: 33 | extractTokenizedPhrases(f, processed_data_dir) 34 | except KeyboardInterrupt: 35 | print("Process stopped by user...") 36 | return 0 37 | except Exception as e: 38 | print(e) 39 | print("Error in " + f) 40 | pass 41 | 42 | 43 | ''' 44 | Loops through folders recursively to find all xml files 45 | ''' 46 | 47 | 48 | def findXmlFiles(directory): 49 | xmlFiles = [] 50 | for f in os.listdir(directory): 51 | if os.path.isdir(directory + f): 52 | xmlFiles = xmlFiles + findXmlFiles(directory + f + "/") 53 | else: 54 | xmlFiles.append(directory + f) 55 | return xmlFiles 56 | 57 | 58 | ''' 59 | The assumption is made (for now) that each node in the xml docs represents 60 | a token, meaning everything has already been tokenized. At first observation 61 | this appears to be an ok assumption. 62 | 63 | This function has been modified to print to a single file for each movie 64 | This is for memory consideration when processing later down the pipeline 65 | ''' 66 | 67 | 68 | def extractTokenizedPhrases(xmlFilePath, dataDirFilePath): 69 | global inc 70 | inc += 1 71 | mkfile(dataDirFilePath + str(inc) + raw_file) 72 | tree = ET.parse(xmlFilePath) 73 | root = tree.getroot() 74 | print("Processing {}...".format(xmlFilePath)) 75 | for child in root.findall('s'): 76 | A = [] 77 | for node in child.getiterator(): 78 | if node.tag == 'w': 79 | A.append(node.text.encode('ascii', 'ignore').replace('-', '')) 80 | text = " ".join(A) 81 | text = cleanText(text) 82 | try: 83 | if text[0] != '[' and text[-1] != ':': 84 | with open(dataDirFilePath + str(inc) + raw_file, 'a') as f: 85 | f.write(text + "\n") 86 | except IndexError: 87 | pass 88 | 89 | ''' 90 | This function removes funky things in text 91 | There is probably a much better way to do it, but unless the token list is 92 | much bigger this shouldn't really matter how inefficient it is 93 | ''' 94 | 95 | 96 | def cleanText(text): 97 | t = text.strip('-') 98 | t = t.lower() 99 | t = t.strip('\"') 100 | regex = re.compile('\(.+?\)') 101 | t = regex.sub('', t) 102 | t.replace(' ', ' ') 103 | regex = re.compile('\{.+?\}') 104 | t = regex.sub('', t) 105 | t = t.replace(' ', ' ') 106 | t = t.replace("~", "") 107 | t = t.strip(' ') 108 | return t 109 | 110 | 111 | ''' 112 | Taken from http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python 113 | ''' 114 | 115 | 116 | def mkdir_p(path): 117 | try: 118 | os.makedirs(path) 119 | except OSError as exc: 120 | if exc.errno == errno.EEXIST and os.path.isdir(path): 121 | pass 122 | else: 123 | raise 124 | 125 | 126 | def mkfile(path): 127 | try: 128 | with open(path, 'w+'): 129 | return 1 130 | except IOError: 131 | print("Data file open, ensure it is closed, and re-run!") 132 | return 0 133 | 134 | 135 | if __name__ == "__main__": 136 | main() 137 | --------------------------------------------------------------------------------