├── .github └── workflows │ └── manual.yml ├── .gitignore ├── CODEOWNERS ├── LICENSE ├── README.md ├── data ├── small_vocab_en └── small_vocab_fr ├── helper.py ├── images ├── bidirectional.png ├── embedding.png └── rnn.png ├── machine_translation.ipynb └── project_tests.py /.github/workflows/manual.yml: -------------------------------------------------------------------------------- 1 | # Workflow to ensure whenever a Github PR is submitted, 2 | # a JIRA ticket gets created automatically. 3 | name: Manual Workflow 4 | 5 | # Controls when the action will run. 6 | on: 7 | # Triggers the workflow on pull request events but only for the master branch 8 | pull_request_target: 9 | types: [opened, reopened] 10 | 11 | # Allows you to run this workflow manually from the Actions tab 12 | workflow_dispatch: 13 | 14 | jobs: 15 | test-transition-issue: 16 | name: Convert Github Issue to Jira Issue 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@master 21 | 22 | - name: Login 23 | uses: atlassian/gajira-login@master 24 | env: 25 | JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} 26 | JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} 27 | JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} 28 | 29 | - name: Create NEW JIRA ticket 30 | id: create 31 | uses: atlassian/gajira-create@master 32 | with: 33 | project: CONUPDATE 34 | issuetype: Task 35 | summary: | 36 | Github PR [Assign the ND component] | Repo: ${{ github.repository }} | PR# ${{github.event.number}} 37 | description: | 38 | Repo link: https://github.com/${{ github.repository }} 39 | PR no. ${{ github.event.pull_request.number }} 40 | PR title: ${{ github.event.pull_request.title }} 41 | PR description: ${{ github.event.pull_request.description }} 42 | In addition, please resolve other issues, if any. 43 | fields: '{"components": [{"name":"Github PR"}], "customfield_16449":"https://classroom.udacity.com/", "customfield_16450":"Resolve the PR", "labels": ["github"], "priority":{"id": "4"}}' 44 | 45 | - name: Log created issue 46 | run: echo "Issue ${{ steps.create.outputs.issue }} was created" 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # docker 83 | .dockerenv 84 | 85 | # dotenv 86 | .env 87 | 88 | # virtualenv 89 | .venv 90 | venv/ 91 | ENV/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | s is a comment. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in 5 | # the repo. 6 | * @cgearhart @luisguiserrano 7 | 8 | 9 | * @udacity/active-public-content -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Udacity, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | In this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation. 3 | 4 | # Setup 5 | 6 | This project requires GPU acceleration to run efficiently. Support is available to use either of the following two methods for accessing GPU-enabled cloud computing resources. 7 | 8 | ## Udacity Workspaces (Recommended) 9 | 10 | Udacity Workspaces provide remote connection to GPU-enabled instances right from the classroom. Refer to the classroom lesson for this project to find an overview of navigating & using Jupyter notebook Workspaces. 11 | 12 | ## Amazon Web Services (Optional) 13 | 14 | Please refer to the Udacity instructions for setting up a GPU instance for this project, and refer to the project instructions in the classroom for setup. The recommended AMI should include compatible versions of all required software and libraries to complete the project. [link for AIND students](https://classroom.udacity.com/nanodegrees/nd889/parts/16cf5df5-73f0-4afa-93a9-de5974257236/modules/53b2a19e-4e29-4ae7-aaf2-33d195dbdeba/lessons/2df3b94c-4f09-476a-8397-e8841b147f84/project) 15 | 16 | ## Install 17 | - Python 3 18 | - NumPy 19 | - TensorFlow 1.x 20 | - Keras 2.x 21 | 22 | # Submission 23 | When you are ready to submit your project, do the following steps: 24 | 1. Ensure you pass all points on the [rubric](https://review.udacity.com/#!/rubrics/1004/view). 25 | 2. Submit the following in a zip file: 26 | - `helper.py` 27 | - `machine_translation.ipynb` 28 | - `machine_translation.html` 29 | 30 | ## Converting to HTML 31 | 32 | There are several ways to generate an HTML copy of the notebook: 33 | 34 | - Running the last cell of the notebook will export an HTML copy 35 | 36 | - Navigating to **File -> Download as -> HTML (.html)** within the notebook 37 | 38 | - Using `nbconvert` from the command line 39 | 40 | $ pip install nbconvert 41 | $ nbconvert machine_translation.ipynb 42 | -------------------------------------------------------------------------------- /helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_data(path): 5 | """ 6 | Load dataset 7 | """ 8 | input_file = os.path.join(path) 9 | with open(input_file, "r") as f: 10 | data = f.read() 11 | 12 | return data.split('\n') 13 | -------------------------------------------------------------------------------- /images/bidirectional.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/aind2-nlp-capstone/194b4046a8a910addb149fb10db33fab3cfb7862/images/bidirectional.png -------------------------------------------------------------------------------- /images/embedding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/aind2-nlp-capstone/194b4046a8a910addb149fb10db33fab3cfb7862/images/embedding.png -------------------------------------------------------------------------------- /images/rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/udacity/aind2-nlp-capstone/194b4046a8a910addb149fb10db33fab3cfb7862/images/rnn.png -------------------------------------------------------------------------------- /machine_translation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Artificial Intelligence Nanodegree\n", 10 | "## Machine Translation Project\n", 11 | "In this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully!\n", 12 | "\n", 13 | "## Introduction\n", 14 | "In this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation.\n", 15 | "\n", 16 | "- **Preprocess** - You'll convert text to sequence of integers.\n", 17 | "- **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model!\n", 18 | "- **Prediction** Run the model on English text." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "%load_ext autoreload\n", 28 | "%aimport helper, tests\n", 29 | "%autoreload 1" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "import collections\n", 39 | "\n", 40 | "import helper\n", 41 | "import numpy as np\n", 42 | "import project_tests as tests\n", 43 | "\n", 44 | "from keras.preprocessing.text import Tokenizer\n", 45 | "from keras.preprocessing.sequence import pad_sequences\n", 46 | "from keras.models import Model\n", 47 | "from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional\n", 48 | "from keras.layers.embeddings import Embedding\n", 49 | "from keras.optimizers import Adam\n", 50 | "from keras.losses import sparse_categorical_crossentropy" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Verify access to the GPU\n", 58 | "The following test applies only if you expect to be using a GPU, e.g., while running in a Udacity Workspace or using an AWS instance with GPU support. Run the next cell, and verify that the device_type is \"GPU\".\n", 59 | "- If the device is not GPU & you are running from a Udacity Workspace, then save your workspace with the icon at the top, then click \"enable\" at the bottom of the workspace.\n", 60 | "- If the device is not GPU & you are running from an AWS instance, then refer to the cloud computing instructions in the classroom to verify your setup steps." 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "from tensorflow.python.client import device_lib\n", 70 | "print(device_lib.list_local_devices())" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "## Dataset\n", 78 | "We begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset.\n", 79 | "### Load Data\n", 80 | "The data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below." 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "# Load English data\n", 90 | "english_sentences = helper.load_data('data/small_vocab_en')\n", 91 | "# Load French data\n", 92 | "french_sentences = helper.load_data('data/small_vocab_fr')\n", 93 | "\n", 94 | "print('Dataset Loaded')" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "### Files\n", 102 | "Each line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file." 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "for sample_i in range(2):\n", 112 | " print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i]))\n", 113 | " print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i]))" 114 | ] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing.\n", 121 | "### Vocabulary\n", 122 | "The complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with." 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])\n", 132 | "french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])\n", 133 | "\n", 134 | "print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))\n", 135 | "print('{} unique English words.'.format(len(english_words_counter)))\n", 136 | "print('10 Most common words in the English dataset:')\n", 137 | "print('\"' + '\" \"'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '\"')\n", 138 | "print()\n", 139 | "print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))\n", 140 | "print('{} unique French words.'.format(len(french_words_counter)))\n", 141 | "print('10 Most common words in the French dataset:')\n", 142 | "print('\"' + '\" \"'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '\"')" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": {}, 148 | "source": [ 149 | "For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words.\n", 150 | "## Preprocess\n", 151 | "For this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods:\n", 152 | "1. Tokenize the words into ids\n", 153 | "2. Add padding to make all the sequences the same length.\n", 154 | "\n", 155 | "Time to start preprocessing the data...\n", 156 | "### Tokenize (IMPLEMENTATION)\n", 157 | "For a neural network to predict on text data, it first has to be turned into data it can understand. Text data like \"dog\" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s).\n", 158 | "\n", 159 | "We can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those.\n", 160 | "\n", 161 | "Turn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below.\n", 162 | "\n", 163 | "Running the cell will run `tokenize` on sample data and show output for debugging." 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "def tokenize(x):\n", 173 | " \"\"\"\n", 174 | " Tokenize x\n", 175 | " :param x: List of sentences/strings to be tokenized\n", 176 | " :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n", 177 | " \"\"\"\n", 178 | " # TODO: Implement\n", 179 | " return None, None\n", 180 | "tests.test_tokenize(tokenize)\n", 181 | "\n", 182 | "# Tokenize Example output\n", 183 | "text_sentences = [\n", 184 | " 'The quick brown fox jumps over the lazy dog .',\n", 185 | " 'By Jove , my quick study of lexicography won a prize .',\n", 186 | " 'This is a short sentence .']\n", 187 | "text_tokenized, text_tokenizer = tokenize(text_sentences)\n", 188 | "print(text_tokenizer.word_index)\n", 189 | "print()\n", 190 | "for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):\n", 191 | " print('Sequence {} in x'.format(sample_i + 1))\n", 192 | " print(' Input: {}'.format(sent))\n", 193 | " print(' Output: {}'.format(token_sent))" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": {}, 199 | "source": [ 200 | "### Padding (IMPLEMENTATION)\n", 201 | "When batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length.\n", 202 | "\n", 203 | "Make sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function." 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "def pad(x, length=None):\n", 213 | " \"\"\"\n", 214 | " Pad x\n", 215 | " :param x: List of sequences.\n", 216 | " :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n", 217 | " :return: Padded numpy array of sequences\n", 218 | " \"\"\"\n", 219 | " # TODO: Implement\n", 220 | " return None\n", 221 | "tests.test_pad(pad)\n", 222 | "\n", 223 | "# Pad Tokenized output\n", 224 | "test_pad = pad(text_tokenized)\n", 225 | "for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):\n", 226 | " print('Sequence {} in x'.format(sample_i + 1))\n", 227 | " print(' Input: {}'.format(np.array(token_sent)))\n", 228 | " print(' Output: {}'.format(pad_sent))" 229 | ] 230 | }, 231 | { 232 | "cell_type": "markdown", 233 | "metadata": {}, 234 | "source": [ 235 | "### Preprocess Pipeline\n", 236 | "Your focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function." 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": null, 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "def preprocess(x, y):\n", 246 | " \"\"\"\n", 247 | " Preprocess x and y\n", 248 | " :param x: Feature List of sentences\n", 249 | " :param y: Label List of sentences\n", 250 | " :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n", 251 | " \"\"\"\n", 252 | " preprocess_x, x_tk = tokenize(x)\n", 253 | " preprocess_y, y_tk = tokenize(y)\n", 254 | "\n", 255 | " preprocess_x = pad(preprocess_x)\n", 256 | " preprocess_y = pad(preprocess_y)\n", 257 | "\n", 258 | " # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions\n", 259 | " preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)\n", 260 | "\n", 261 | " return preprocess_x, preprocess_y, x_tk, y_tk\n", 262 | "\n", 263 | "preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\\\n", 264 | " preprocess(english_sentences, french_sentences)\n", 265 | " \n", 266 | "max_english_sequence_length = preproc_english_sentences.shape[1]\n", 267 | "max_french_sequence_length = preproc_french_sentences.shape[1]\n", 268 | "english_vocab_size = len(english_tokenizer.word_index)\n", 269 | "french_vocab_size = len(french_tokenizer.word_index)\n", 270 | "\n", 271 | "print('Data Preprocessed')\n", 272 | "print(\"Max English sentence length:\", max_english_sequence_length)\n", 273 | "print(\"Max French sentence length:\", max_french_sequence_length)\n", 274 | "print(\"English vocabulary size:\", english_vocab_size)\n", 275 | "print(\"French vocabulary size:\", french_vocab_size)" 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "## Models\n", 283 | "In this section, you will experiment with various neural network architectures.\n", 284 | "You will begin by training four relatively simple architectures.\n", 285 | "- Model 1 is a simple RNN\n", 286 | "- Model 2 is a RNN with Embedding\n", 287 | "- Model 3 is a Bidirectional RNN\n", 288 | "- Model 4 is an optional Encoder-Decoder RNN\n", 289 | "\n", 290 | "After experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models.\n", 291 | "### Ids Back to Text\n", 292 | "The neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gap between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network." 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "metadata": {}, 299 | "outputs": [], 300 | "source": [ 301 | "def logits_to_text(logits, tokenizer):\n", 302 | " \"\"\"\n", 303 | " Turn logits from a neural network into text using the tokenizer\n", 304 | " :param logits: Logits from a neural network\n", 305 | " :param tokenizer: Keras Tokenizer fit on the labels\n", 306 | " :return: String that represents the text of the logits\n", 307 | " \"\"\"\n", 308 | " index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n", 309 | " index_to_words[0] = ''\n", 310 | "\n", 311 | " return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])\n", 312 | "\n", 313 | "print('`logits_to_text` function loaded.')" 314 | ] 315 | }, 316 | { 317 | "cell_type": "markdown", 318 | "metadata": {}, 319 | "source": [ 320 | "### Model 1: RNN (IMPLEMENTATION)\n", 321 | "![RNN](images/rnn.png)\n", 322 | "A basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French." 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": null, 328 | "metadata": {}, 329 | "outputs": [], 330 | "source": [ 331 | "def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n", 332 | " \"\"\"\n", 333 | " Build and train a basic RNN on x and y\n", 334 | " :param input_shape: Tuple of input shape\n", 335 | " :param output_sequence_length: Length of output sequence\n", 336 | " :param english_vocab_size: Number of unique English words in the dataset\n", 337 | " :param french_vocab_size: Number of unique French words in the dataset\n", 338 | " :return: Keras model built, but not trained\n", 339 | " \"\"\"\n", 340 | " # TODO: Build the layers\n", 341 | " model = None\n", 342 | " model.compile(loss=sparse_categorical_crossentropy,\n", 343 | " optimizer=Adam(learning_rate),\n", 344 | " metrics=['accuracy'])\n", 345 | " return model\n", 346 | "tests.test_simple_model(simple_model)\n", 347 | "\n", 348 | "# Reshaping the input to work with a basic RNN\n", 349 | "tmp_x = pad(preproc_english_sentences, max_french_sequence_length)\n", 350 | "tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n", 351 | "\n", 352 | "# Train the neural network\n", 353 | "simple_rnn_model = simple_model(\n", 354 | " tmp_x.shape,\n", 355 | " max_french_sequence_length,\n", 356 | " english_vocab_size,\n", 357 | " french_vocab_size)\n", 358 | "simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n", 359 | "\n", 360 | "# Print prediction(s)\n", 361 | "print(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))" 362 | ] 363 | }, 364 | { 365 | "cell_type": "markdown", 366 | "metadata": {}, 367 | "source": [ 368 | "### Model 2: Embedding (IMPLEMENTATION)\n", 369 | "![RNN](images/embedding.png)\n", 370 | "You've turned the words into ids, but there's a better representation of a word. This is called word embeddings. An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors.\n", 371 | "\n", 372 | "In this model, you'll create a RNN model using embedding." 373 | ] 374 | }, 375 | { 376 | "cell_type": "code", 377 | "execution_count": null, 378 | "metadata": {}, 379 | "outputs": [], 380 | "source": [ 381 | "def embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n", 382 | " \"\"\"\n", 383 | " Build and train a RNN model using word embedding on x and y\n", 384 | " :param input_shape: Tuple of input shape\n", 385 | " :param output_sequence_length: Length of output sequence\n", 386 | " :param english_vocab_size: Number of unique English words in the dataset\n", 387 | " :param french_vocab_size: Number of unique French words in the dataset\n", 388 | " :return: Keras model built, but not trained\n", 389 | " \"\"\"\n", 390 | " # TODO: Implement\n", 391 | " return None\n", 392 | "tests.test_embed_model(embed_model)\n", 393 | "\n", 394 | "\n", 395 | "# TODO: Reshape the input\n", 396 | "\n", 397 | "# TODO: Train the neural network\n", 398 | "\n", 399 | "# TODO: Print prediction(s)" 400 | ] 401 | }, 402 | { 403 | "cell_type": "markdown", 404 | "metadata": {}, 405 | "source": [ 406 | "### Model 3: Bidirectional RNNs (IMPLEMENTATION)\n", 407 | "![RNN](images/bidirectional.png)\n", 408 | "One restriction of a RNN is that it can't see the future input, only the past. This is where bidirectional recurrent neural networks come in. They are able to see the future data." 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "metadata": {}, 415 | "outputs": [], 416 | "source": [ 417 | "def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n", 418 | " \"\"\"\n", 419 | " Build and train a bidirectional RNN model on x and y\n", 420 | " :param input_shape: Tuple of input shape\n", 421 | " :param output_sequence_length: Length of output sequence\n", 422 | " :param english_vocab_size: Number of unique English words in the dataset\n", 423 | " :param french_vocab_size: Number of unique French words in the dataset\n", 424 | " :return: Keras model built, but not trained\n", 425 | " \"\"\"\n", 426 | " # TODO: Implement\n", 427 | " return None\n", 428 | "tests.test_bd_model(bd_model)\n", 429 | "\n", 430 | "\n", 431 | "# TODO: Train and Print prediction(s)" 432 | ] 433 | }, 434 | { 435 | "cell_type": "markdown", 436 | "metadata": {}, 437 | "source": [ 438 | "### Model 4: Encoder-Decoder (OPTIONAL)\n", 439 | "Time to look at encoder-decoder models. This model is made up of an encoder and decoder. The encoder creates a matrix representation of the sentence. The decoder takes this matrix as input and predicts the translation as output.\n", 440 | "\n", 441 | "Create an encoder-decoder model in the cell below." 442 | ] 443 | }, 444 | { 445 | "cell_type": "code", 446 | "execution_count": null, 447 | "metadata": {}, 448 | "outputs": [], 449 | "source": [ 450 | "def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n", 451 | " \"\"\"\n", 452 | " Build and train an encoder-decoder model on x and y\n", 453 | " :param input_shape: Tuple of input shape\n", 454 | " :param output_sequence_length: Length of output sequence\n", 455 | " :param english_vocab_size: Number of unique English words in the dataset\n", 456 | " :param french_vocab_size: Number of unique French words in the dataset\n", 457 | " :return: Keras model built, but not trained\n", 458 | " \"\"\"\n", 459 | " # OPTIONAL: Implement\n", 460 | " return None\n", 461 | "tests.test_encdec_model(encdec_model)\n", 462 | "\n", 463 | "\n", 464 | "# OPTIONAL: Train and Print prediction(s)" 465 | ] 466 | }, 467 | { 468 | "cell_type": "markdown", 469 | "metadata": {}, 470 | "source": [ 471 | "### Model 5: Custom (IMPLEMENTATION)\n", 472 | "Use everything you learned from the previous models to create a model that incorporates embedding and a bidirectional rnn into one model." 473 | ] 474 | }, 475 | { 476 | "cell_type": "code", 477 | "execution_count": null, 478 | "metadata": {}, 479 | "outputs": [], 480 | "source": [ 481 | "def model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n", 482 | " \"\"\"\n", 483 | " Build and train a model that incorporates embedding, encoder-decoder, and bidirectional RNN on x and y\n", 484 | " :param input_shape: Tuple of input shape\n", 485 | " :param output_sequence_length: Length of output sequence\n", 486 | " :param english_vocab_size: Number of unique English words in the dataset\n", 487 | " :param french_vocab_size: Number of unique French words in the dataset\n", 488 | " :return: Keras model built, but not trained\n", 489 | " \"\"\"\n", 490 | " # TODO: Implement\n", 491 | " return None\n", 492 | "tests.test_model_final(model_final)\n", 493 | "\n", 494 | "\n", 495 | "print('Final Model Loaded')\n", 496 | "# TODO: Train the final model" 497 | ] 498 | }, 499 | { 500 | "cell_type": "markdown", 501 | "metadata": {}, 502 | "source": [ 503 | "## Prediction (IMPLEMENTATION)" 504 | ] 505 | }, 506 | { 507 | "cell_type": "code", 508 | "execution_count": null, 509 | "metadata": {}, 510 | "outputs": [], 511 | "source": [ 512 | "def final_predictions(x, y, x_tk, y_tk):\n", 513 | " \"\"\"\n", 514 | " Gets predictions using the final model\n", 515 | " :param x: Preprocessed English data\n", 516 | " :param y: Preprocessed French data\n", 517 | " :param x_tk: English tokenizer\n", 518 | " :param y_tk: French tokenizer\n", 519 | " \"\"\"\n", 520 | " # TODO: Train neural network using model_final\n", 521 | " model = None\n", 522 | "\n", 523 | " \n", 524 | " ## DON'T EDIT ANYTHING BELOW THIS LINE\n", 525 | " y_id_to_word = {value: key for key, value in y_tk.word_index.items()}\n", 526 | " y_id_to_word[0] = ''\n", 527 | "\n", 528 | " sentence = 'he saw a old yellow truck'\n", 529 | " sentence = [x_tk.word_index[word] for word in sentence.split()]\n", 530 | " sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')\n", 531 | " sentences = np.array([sentence[0], x[0]])\n", 532 | " predictions = model.predict(sentences, len(sentences))\n", 533 | "\n", 534 | " print('Sample 1:')\n", 535 | " print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))\n", 536 | " print('Il a vu un vieux camion jaune')\n", 537 | " print('Sample 2:')\n", 538 | " print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))\n", 539 | " print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))\n", 540 | "\n", 541 | "\n", 542 | "final_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)" 543 | ] 544 | }, 545 | { 546 | "cell_type": "markdown", 547 | "metadata": {}, 548 | "source": [ 549 | "## Submission\n", 550 | "When you're ready to submit, complete the following steps:\n", 551 | "1. Review the [rubric](https://review.udacity.com/#!/rubrics/1004/view) to ensure your submission meets all requirements to pass\n", 552 | "2. Generate an HTML version of this notebook\n", 553 | "\n", 554 | " - Run the next cell to attempt automatic generation (this is the recommended method in Workspaces)\n", 555 | " - Navigate to **FILE -> Download as -> HTML (.html)**\n", 556 | " - Manually generate a copy using `nbconvert` from your shell terminal\n", 557 | "```\n", 558 | "$ pip install nbconvert\n", 559 | "$ python -m nbconvert machine_translation.ipynb\n", 560 | "```\n", 561 | " \n", 562 | "3. Submit the project\n", 563 | "\n", 564 | " - If you are in a Workspace, simply click the \"Submit Project\" button (bottom towards the right)\n", 565 | " \n", 566 | " - Otherwise, add the following files into a zip archive and submit them \n", 567 | " - `helper.py`\n", 568 | " - `machine_translation.ipynb`\n", 569 | " - `machine_translation.html`\n", 570 | " - You can export the notebook by navigating to **File -> Download as -> HTML (.html)**." 571 | ] 572 | }, 573 | { 574 | "cell_type": "code", 575 | "execution_count": null, 576 | "metadata": {}, 577 | "outputs": [], 578 | "source": [ 579 | "!!python -m nbconvert *.ipynb" 580 | ] 581 | }, 582 | { 583 | "cell_type": "markdown", 584 | "metadata": {}, 585 | "source": [ 586 | "## Optional Enhancements\n", 587 | "\n", 588 | "This project focuses on learning various network architectures for machine translation, but we don't evaluate the models according to best practices by splitting the data into separate test & training sets -- so the model accuracy is overstated. Use the [`sklearn.model_selection.train_test_split()`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to create separate training & test datasets, then retrain each of the models using only the training set and evaluate the prediction accuracy using the hold out test set. Does the \"best\" model change?" 589 | ] 590 | } 591 | ], 592 | "metadata": { 593 | "kernelspec": { 594 | "display_name": "Python 3", 595 | "language": "python", 596 | "name": "python3" 597 | }, 598 | "language_info": { 599 | "codemirror_mode": { 600 | "name": "ipython", 601 | "version": 3 602 | }, 603 | "file_extension": ".py", 604 | "mimetype": "text/x-python", 605 | "name": "python", 606 | "nbconvert_exporter": "python", 607 | "pygments_lexer": "ipython3", 608 | "version": "3.6.3" 609 | } 610 | }, 611 | "nbformat": 4, 612 | "nbformat_minor": 1 613 | } 614 | -------------------------------------------------------------------------------- /project_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from keras.losses import sparse_categorical_crossentropy 3 | from keras.models import Sequential 4 | from keras.preprocessing.text import Tokenizer 5 | from keras.utils import to_categorical 6 | 7 | 8 | def _test_model(model, input_shape, output_sequence_length, french_vocab_size): 9 | if isinstance(model, Sequential): 10 | model = model.model 11 | 12 | assert model.input_shape == (None, *input_shape[1:]),\ 13 | 'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape) 14 | 15 | assert model.output_shape == (None, output_sequence_length, french_vocab_size),\ 16 | 'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\ 17 | .format(model.output_shape, output_sequence_length, french_vocab_size) 18 | 19 | assert len(model.loss_functions) > 0,\ 20 | 'No loss function set. Apply the `compile` function to the model.' 21 | 22 | assert sparse_categorical_crossentropy in model.loss_functions,\ 23 | 'Not using `sparse_categorical_crossentropy` function for loss.' 24 | 25 | 26 | def test_tokenize(tokenize): 27 | sentences = [ 28 | 'The quick brown fox jumps over the lazy dog .', 29 | 'By Jove , my quick study of lexicography won a prize .', 30 | 'This is a short sentence .'] 31 | tokenized_sentences, tokenizer = tokenize(sentences) 32 | assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\ 33 | 'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. ' 34 | 35 | 36 | def test_pad(pad): 37 | tokens = [ 38 | [i for i in range(4)], 39 | [i for i in range(6)], 40 | [i for i in range(3)]] 41 | padded_tokens = pad(tokens) 42 | padding_id = padded_tokens[0][-1] 43 | true_padded_tokens = np.array([ 44 | [i for i in range(4)] + [padding_id]*2, 45 | [i for i in range(6)], 46 | [i for i in range(3)] + [padding_id]*3]) 47 | assert isinstance(padded_tokens, np.ndarray),\ 48 | 'Pad returned the wrong type. Found {} type, expected numpy array type.' 49 | assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.' 50 | 51 | padded_tokens_using_length = pad(tokens, 9) 52 | assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\ 53 | 'Using length argument return incorrect results' 54 | 55 | 56 | def test_simple_model(simple_model): 57 | input_shape = (137861, 21, 1) 58 | output_sequence_length = 21 59 | english_vocab_size = 199 60 | french_vocab_size = 344 61 | 62 | model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size) 63 | _test_model(model, input_shape, output_sequence_length, french_vocab_size) 64 | 65 | 66 | def test_embed_model(embed_model): 67 | input_shape = (137861, 21) 68 | output_sequence_length = 21 69 | english_vocab_size = 199 70 | french_vocab_size = 344 71 | 72 | model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size) 73 | _test_model(model, input_shape, output_sequence_length, french_vocab_size) 74 | 75 | 76 | def test_encdec_model(encdec_model): 77 | input_shape = (137861, 15, 1) 78 | output_sequence_length = 21 79 | english_vocab_size = 199 80 | french_vocab_size = 344 81 | 82 | model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size) 83 | _test_model(model, input_shape, output_sequence_length, french_vocab_size) 84 | 85 | 86 | def test_bd_model(bd_model): 87 | input_shape = (137861, 21, 1) 88 | output_sequence_length = 21 89 | english_vocab_size = 199 90 | french_vocab_size = 344 91 | 92 | model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size) 93 | _test_model(model, input_shape, output_sequence_length, french_vocab_size) 94 | 95 | 96 | def test_model_final(model_final): 97 | input_shape = (137861, 15) 98 | output_sequence_length = 21 99 | english_vocab_size = 199 100 | french_vocab_size = 344 101 | 102 | model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size) 103 | _test_model(model, input_shape, output_sequence_length, french_vocab_size) 104 | --------------------------------------------------------------------------------