├── .gitignore ├── README.md ├── data ├── __init__.py ├── data_utils.py ├── dataset.py ├── newstest2013.de └── newstest2013.en ├── preprocess.py ├── train.py ├── transformer ├── __init__.py ├── beam.py ├── layers.py ├── models.py ├── modules.py ├── optimizer.py ├── sublayers.py └── translator.py └── translate.py /.gitignore: -------------------------------------------------------------------------------- 1 | models/ 2 | model_* 3 | *.pyc 4 | *.ipynb 5 | ipynb/.* 6 | .DS_Store 7 | .idea 8 | data/newstest2013-train.t7 9 | data/newstest2013.src.dict 10 | data/newstest2013.tgt.dict 11 | data/newstest2013.en.trans.* 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## A Pytorch Implementation of the Transformer Network 2 | This repository includes pytorch implementations of ["Attention is All You Need"](https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf) (Vaswani et al., NIPS 2017) and 3 | ["Weighted Transformer Network for Machine Translation"](https://arxiv.org/pdf/1711.02132.pdf) (Ahmed et al., arXiv 2017) 4 | 5 | ## Reference 6 | **Paper** 7 | - Vaswani et al., "Attention is All You Need", NIPS 2017 8 | - Ahmed et al., "Weighted Transformer Network for Machine Translation", Arxiv 2017 9 | 10 | **Code** 11 | - [jadore801120/attention-is-all-you-need](https://github.com/jadore801120/attention-is-all-you-need-pytorch) 12 | - [OpenNMT/OpenNMT-py](https://github.com/OpenNMT/OpenNMT-py) 13 | - [The Annotated Transformers](http://nlp.seas.harvard.edu/2018/04/03/attention.html) 14 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | import data.data_utils 2 | import data.dataset 3 | 4 | __all__ = [data_utils, dataset] 5 | -------------------------------------------------------------------------------- /data/data_utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import torch 3 | import torchtext.data as data 4 | from torchtext.data import Field, Iterator, BucketIterator 5 | from data.dataset import ParallelDataset 6 | from collections import Counter 7 | 8 | # Extra vocabulary symbols 9 | pad_token = "" 10 | unk_token = "" 11 | bos_token = "" 12 | eos_token = "" 13 | 14 | extra_tokens = [pad_token, unk_token, bos_token, eos_token] 15 | 16 | PAD = extra_tokens.index(pad_token) 17 | UNK = extra_tokens.index(unk_token) 18 | BOS = extra_tokens.index(bos_token) 19 | EOS = extra_tokens.index(eos_token) 20 | 21 | 22 | def convert_text2idx(examples, word2idx): 23 | return [[word2idx[w] if w in word2idx else UNK 24 | for w in sent] for sent in examples] 25 | 26 | 27 | def convert_idx2text(example, idx2word): 28 | words = [] 29 | for i in example: 30 | if i == EOS: 31 | break 32 | words.append(idx2word[i]) 33 | return ' '.join(words) 34 | 35 | 36 | def read_corpus(src_path, max_len, lower_case=False): 37 | print('Reading examples from {}..'.format(src_path)) 38 | src_sents = [] 39 | empty_lines, exceed_lines = 0, 0 40 | with open(src_path) as src_file: 41 | for idx, src_line in enumerate(src_file): 42 | if idx % 10000 == 0: 43 | print(' reading {} lines..'.format(idx)) 44 | if src_line.strip() == '': # remove empty lines 45 | empty_lines += 1 46 | continue 47 | if lower_case: # check lower_case 48 | src_line = src_line.lower() 49 | 50 | src_words = src_line.strip().split() 51 | if max_len is not None and len(src_words) > max_len: 52 | exceed_lines += 1 53 | continue 54 | src_sents.append(src_words) 55 | 56 | print('Removed {} empty lines'.format(empty_lines), 57 | 'and {} lines exceeding the length {}'.format(exceed_lines, max_len)) 58 | print('Result: {} lines remained'.format(len(src_sents))) 59 | return src_sents 60 | 61 | 62 | def read_parallel_corpus(src_path, tgt_path, max_len, lower_case=False): 63 | print ('Reading examples from {} and {}..'.format(src_path, tgt_path)) 64 | src_sents, tgt_sents = [], [] 65 | empty_lines, exceed_lines = 0, 0 66 | with open(src_path) as src_file, open(tgt_path) as tgt_file: 67 | for idx, (src_line, tgt_line) in enumerate(zip(src_file, tgt_file)): 68 | if idx % 10000 == 0: 69 | print(' reading {} lines..'.format(idx)) 70 | if src_line.strip() == '' or tgt_line.strip() == '': # remove empty lines 71 | empty_lines += 1 72 | continue 73 | if lower_case: # check lower_case 74 | src_line = src_line.lower() 75 | tgt_line = tgt_line.lower() 76 | 77 | src_words = src_line.strip().split() 78 | tgt_words = tgt_line.strip().split() 79 | if max_len is not None and (len(src_words) > max_len or len(tgt_words) > max_len): 80 | exceed_lines += 1 81 | continue 82 | src_sents.append(src_words) 83 | tgt_sents.append(tgt_words) 84 | 85 | print ('Filtered {} empty lines'.format(empty_lines), 86 | 'and {} lines exceeding the length {}'.format(exceed_lines, max_len)) 87 | print ('Result: {} lines remained'.format(len(src_sents))) 88 | return src_sents, tgt_sents 89 | 90 | 91 | def build_vocab(examples, max_size, min_freq, extra_tokens): 92 | print ('Creating vocabulary with max limit {}..'.format(max_size)) 93 | counter = Counter() 94 | word2idx, idx2word = {}, [] 95 | if extra_tokens: 96 | idx2word += extra_tokens 97 | word2idx = {word: idx for idx, word in enumerate(extra_tokens)} 98 | min_freq = max(min_freq, 1) 99 | max_size = max_size + len(idx2word) if max_size else None 100 | for sent in examples: 101 | for w in sent: 102 | counter.update([w]) 103 | # first sort items in alphabetical order and then by frequency 104 | sorted_counter = sorted(counter.items(), key=lambda tup: tup[0]) 105 | sorted_counter.sort(key=lambda tup: tup[1], reverse=True) 106 | 107 | for word, freq in sorted_counter: 108 | if freq < min_freq or (max_size and len(idx2word) == max_size): 109 | break 110 | idx2word.append(word) 111 | word2idx[word] = len(idx2word) - 1 112 | 113 | print ('Vocabulary of size {} has been created'.format(len(idx2word))) 114 | return counter, word2idx, idx2word 115 | 116 | 117 | def load_train_data(data_path, batch_size, max_src_len, max_trg_len, use_cuda=False): 118 | # Note: sequential=False, use_vocab=False, since we use preprocessed inputs. 119 | src_field = Field(sequential=True, use_vocab=False, include_lengths=True, batch_first=True, 120 | pad_token=PAD, unk_token=UNK, init_token=None, eos_token=None,) 121 | trg_field = Field(sequential=True, use_vocab=False, include_lengths=True, batch_first=True, 122 | pad_token=PAD, unk_token=UNK, init_token=BOS, eos_token=EOS,) 123 | fields = (src_field, trg_field) 124 | device = None if use_cuda else -1 125 | 126 | def filter_pred(example): 127 | if len(example.src) <= max_src_len and len(example.trg) <= max_trg_len: 128 | return True 129 | return False 130 | 131 | dataset = torch.load(data_path) 132 | train_src, train_tgt = dataset['train_src'], dataset['train_tgt'] 133 | dev_src, dev_tgt = dataset['dev_src'], dataset['dev_tgt'] 134 | 135 | train_data = ParallelDataset(train_src, train_tgt, fields=fields, filter_pred=filter_pred,) 136 | train_iter = Iterator(dataset=train_data, batch_size=batch_size, train=True, # Variable(volatile=False) 137 | sort_key=lambda x: data.interleave_keys(len(x.src), len(x.trg)), 138 | repeat=False, shuffle=True, device=device) 139 | dev_data = ParallelDataset(dev_src, dev_tgt, fields=fields,) 140 | dev_iter = Iterator(dataset=dev_data, batch_size=batch_size, train=False, # Variable(volatile=True) 141 | repeat=False, device=device, shuffle=False, sort=False,) 142 | 143 | return src_field, trg_field, train_iter, dev_iter 144 | 145 | 146 | def load_test_data(data_path, vocab_path, batch_size, use_cuda=False): 147 | # Note: sequential=False, use_vocab=False, since we use preprocessed inputs. 148 | src_field = Field(sequential=True, use_vocab=False, include_lengths=True, batch_first=True, 149 | pad_token=PAD, unk_token=UNK, init_token=None, eos_token=None,) 150 | fields = (src_field, None) 151 | device = None if use_cuda else -1 152 | 153 | vocab = torch.load(vocab_path) 154 | _, src_word2idx, _ = vocab['src_dict'] 155 | lower_case = vocab['lower_case'] 156 | 157 | test_src = convert_text2idx(read_corpus(data_path, None, lower_case), src_word2idx) 158 | test_data = ParallelDataset(test_src, None, fields=fields,) 159 | test_iter = Iterator(dataset=test_data, batch_size=batch_size, train=False, # Variable(volatile=True) 160 | repeat=False, device=device, shuffle=False, sort=False) 161 | 162 | return src_field, test_iter 163 | 164 | -------------------------------------------------------------------------------- /data/dataset.py: -------------------------------------------------------------------------------- 1 | 2 | import torchtext.data as data 3 | 4 | 5 | class ParallelDataset(data.Dataset): 6 | """Defines a custom dataset for machine translation.""" 7 | @staticmethod 8 | def sort_key(ex): 9 | return data.interleave_keys(len(ex.src), len(ex.trg)) 10 | 11 | def __init__(self, src_examples, trg_examples, fields, **kwargs): 12 | """Create a Translation Dataset given paths and fields. 13 | 14 | Arguments: 15 | path: Path to the data preprocessed with preprocess.py 16 | category: Whether the Dataset is for training or development 17 | fields: A tuple containing the fields that will be used for data 18 | in each language. 19 | Remaining keyword arguments: Passed to the constructor of 20 | data.Dataset. 21 | """ 22 | if not isinstance(fields[0], (tuple, list)): 23 | if trg_examples is None: 24 | fields = [('src', fields[0])] 25 | else: 26 | fields = [('src', fields[0]), ('trg', fields[1])] 27 | 28 | examples = [] 29 | if trg_examples is None: 30 | for src_line in src_examples: 31 | examples.append(data.Example.fromlist( 32 | [src_line], fields)) 33 | else: 34 | for src_line, trg_line in zip(src_examples, trg_examples): 35 | examples.append(data.Example.fromlist( 36 | [src_line, trg_line], fields)) 37 | 38 | super(ParallelDataset, self).__init__(examples, fields, **kwargs) -------------------------------------------------------------------------------- /data/newstest2013.de: -------------------------------------------------------------------------------- 1 | Eine republikanische Strategie, um der Wiederwahl von Obama entgegenzutreten 2 | Die Führungskräfte der Republikaner rechtfertigen ihre Politik mit der Notwendigkeit, den Wahlbetrug zu bekämpfen. 3 | Allerdings hält das Brennan Center letzteres für einen Mythos, indem es bekräftigt, dass der Wahlbetrug in den USA seltener ist als die Anzahl der vom Blitzschlag getöteten Menschen. 4 | Die Rechtsanwälte der Republikaner haben in 10 Jahren in den USA übrigens nur 300 Fälle von Wahlbetrug verzeichnet. 5 | Eins ist sicher: diese neuen Bestimmungen werden sich negativ auf die Wahlbeteiligung auswirken. 6 | In diesem Sinne untergraben diese Maßnahmen teilweise das demokratische System der USA. 7 | Im Gegensatz zu Kanada sind die US-Bundesstaaten für die Durchführung der Wahlen in den einzelnen Staaten verantwortlich. 8 | In diesem Sinne hat die Mehrheit der amerikanischen Regierungen seit 2009 neue Gesetze verkündet, die das Verfahren für die Registrierung oder den Urnengang erschweren. 9 | Dieses Phänomen hat nach den Wahlen vom November 2010 an Bedeutung gewonnen, bei denen 675 neue republikanische Vertreter in 26 Staaten verzeichnet werden konnten. 10 | Infolgedessen wurden 180 Gesetzesentwürfe allein im Jahr 2011 eingeführt, die die Ausübung des Wahlrechts in 41 Staaten einschränken. 11 | Die neuen Wahlgesetze verlangen, dass die Wähler einen Lichtbildausweis und einen Nachweis der US-Staatsbürgerschaft vorlegen. 12 | Darüber hinaus werden durch diese Gesetze ebenfalls die Zeiträume für die vorzeitige Stimmabgabe verkürzt, das Recht für ungültig erklärt, sich am Wahltag als Wähler zu registrieren, und Staatsbürgern das Wahlrecht abgesprochen, für die eine Gerichtsakte vorliegt. 13 | Vor den Wahlen von 2006 hat kein US-Staat von den Wählern verlangt, einen Lichtbildausweis vorzuzeigen. 14 | Indiana war der erste Bundesstaat, der eine derartige Forderung erhoben hat. 15 | Der Oberste Gerichtshof der Vereinigten Staaten bestätigte im Jahr 2008 die Verfassungsmäßigkeit des Gesetzes von Indiana. 16 | Die republikanischen Behörden beeilten sich, diese Praxis auf andere Staaten auszudehnen. 17 | Im Laufe der letzten beiden Jahre förderten sie in 34 Bundesstaaten Gesetzesentwürfe, mit denen die Wähler gezwungen werden, einen Lichtbildausweis vorzuzeigen. 18 | Es ist wichtig anzumerken, dass die US-Bürger im Gegensatz zu Quebec nicht über eine universelle Ausweiskarte wie die Krankenversicherungskarte verfügen. 19 | In der Tat besitzen 11% der US-Bürger, d. h. 21 Millionen wahlberechtigte Personen keinen von einer Regierungsbehörde ihres Staates ausgestellten Lichtbildausweis. 20 | Darüber hinaus verfügen fünf Millionen Neuwähler im Jahr 2012 nicht über einen derartigen Ausweis. 21 | Allerdings sind häufig mehr als hundert Dollar zu zahlen, um den erforderlichen Ausweis zu erhalten. 22 | Von den neuen Einschränkungen sind junge Menschen, Minderheiten und Menschen mit niedrigem Einkommen unverhältnismäßig stark betroffen. 23 | In der Tat verfügen 25% der Afro-Amerikaner, 15% der Personen mit einem Einkommen von weniger als $35.000, 18% der Bürger über 65 Jahre und 20% der Wähler zwischen 18 und 29 Jahren nicht über den erforderlichen Lichtbildausweis. 24 | Dies reicht noch weiter. 25 | Den Studenten, die als Wähler angesehen werden, die ihre Stimme vermehrt für demokratische Kandidaten abgeben, ist es in mehreren Staaten nicht erlaubt, ihren von ihrer Einrichtung ausgestellten Lichtbildausweis zu benutzen. 26 | Andererseits erlauben dieselben Staaten es Mitgliedern von Angel- oder Jagdvereinen, die stärker für die republikanische Seite wählen, die von diesen Vereinen ausgestellten Karten für Wahlen zu benutzen. 27 | Vor 2004 hat kein Bundesstaat für das Wählen einen Nachweis der Staatsbürgerschaft gefordert. 28 | Arizona war der erste Bundesstaat, der eine derartige Forderung einführte. 29 | Seit 2011 haben ein Dutzend Staaten Gesetze verabschiedet, die von den Wählern fordern, dass sie ihre US-Staatsbürgerschaft nachweisen. 30 | Diese Maßnahmen zielen deutlich darauf ab, die hispanischen Stimmen zu begrenzen. 31 | Denn es liegt auf der Hand, dass zwei von drei hispanischen Wählern, die demokratische Partei bevorzugen. 32 | Darüber hinaus haben republikanische Gesetzgeber 2011 in acht Bundesstaaten Gesetze gefördert, mit denen die Registrierung von Wählern am Wahltag abgeschafft wurde. 33 | Darüber hinaus haben Sie das Recht von Einzelpersonen und Gruppen beschränkt, jenen Wählern Hilfestellung zu leisten, die sich registrieren möchten. 34 | Diese Einschränkungen sind nicht ohne Folgen. 35 | Beispielsweise haben die Kampagnen zur Wählerregistrierung bei den allgemeinen Wahlen 2004 dazu beigetragen, ca. 10 Mio. Staatsbürger zu registrieren. 36 | Denn durch die seit 2009 verabschiedeten Maßnahmen hat sich der Anteil der Registrierung von Neuwählern 2010 im Vergleich zu 2006 um 17% verringert. 37 | Außerdem haben die republikanischen Gesetzgeber in fünf weiteren Bundesstaaten Gesetze verabschiedet, die darauf abzielen, den Zeitraum für die vorzeitige Stimmabgabe zu verkürzen. 38 | Beispielsweise waren bei den allgemeinen Wahlen 2008 in Florida 33% der Wähler, die im Voraus gewählt haben, Afro-Amerikaner, obwohl sie nur 13% der Wähler des Bundesstaates ausmachen. 39 | Gleiches gilt für die hispanische Bevölkerung. 40 | Letztere machte nur 11% der Wähler aus, aber 24% der Bürger, die im Voraus gewählt haben. 41 | Im Gegensatz dazu machten die Weißen, die 76% der Wähler darstellten, nur 46% der Wähler aus, die im Voraus wählten. 42 | Natürlich haben sich die demokratischen Gesetzgeber und ihre Anhänger energisch der Verabschiedung von Gesetzen entgegengestellt, die die Registrierung der Wähler einschränken. 43 | Mehrere Gesetzesentwürfe wurden durch die Vetos der demokratischen Gouverneure blockiert. 44 | Der Generalanwalt der USA hat eingegriffen, um die umstrittensten Gesetze auszusetzen. 45 | Sie konnten die Schäden teilweise begrenzen. 46 | Beispielsweise haben nur 16 der 34 Bundesstaaten Gesetze verabschiedet, die das Vorzeigen eines Lichtbildausweises verlangen. 47 | Allerdings werden die neu eingeführten Regeln im Jahr 2012 zweifellos die Ausübung des Wahlrechts erschweren. 48 | Die demokratischen Kritiker prangern die parteipolitische Natur der verabschiedeten Gesetze an und sehen in ihnen als klares Ziel die Beeinflussung der Ergebnisse in den für 2012 ausschlaggebenden Staaten. 49 | Ein Bericht des Brennan Centers von 2011 zeigt, dass diejenigen Staaten, die diese Gesetze verabschiedet haben, 171 der 270 Stimmen ausmachen, die im Wahlmännergremium erforderlich sind, um die Präsidentschaft zu gewinnen. 50 | Es ist zu früh, um mit Gewissheit zu bestätigen, dass diese Änderungen der Rechtsvorschriften am Wahlsystem erhebliche Auswirkungen auf das Ergebnis der Präsidentschaftswahlen im Jahr 2012 haben werden. 51 | Aber eins ist sicher: diese neuen Bestimmungen werden sich negativ auf die Wahlbeteiligung auswirken. 52 | In diesem Sinne untergraben diese Maßnahmen teilweise das demokratische System der USA. 53 | Prostatakrebsfrüherkennung: sollte der Test gemacht werden oder nicht? 54 | Der PSA-Test weise in der Tat manchmal fehlerhafte Ergebnisse auf, mit falschen negativen oder aber auch falschen positiven Ergebnissen, die zu unnötigen medizinischen Eingriffen führen. 55 | Dies führt zu stärkerem Zögern der bereits unentschlossenen Männer für die Durchführung von Früherkennungstests. 56 | Sollte der Test gemacht werden oder nicht? 57 | Wir haben die Meinung von zwei Fachärzten eingeholt. 58 | Bei den in den USA durchgeführten Studien bestanden zahlreiche Störfaktoren zwischen den Kontrollgruppen, sodass es schwierig ist, diese Daten zu interpretieren und eindeutige Empfehlungen zu geben. 59 | Eine weitere Studie, dieses Mal aus Europa, kam zu der Schlussfolgerung, dass ein Unterschied bei der Sterblichkeit von Patienten mit und ohne Früherkennung besteht. 60 | Diese Studie hat ebenfalls gezeigt, dass bei einer Nachbeobachtung nach 12 Jahren die Wahrscheinlichkeit der Bildung von Metastasen um 30 bis 40% höher ist, wenn keine Früherkennung durchgeführt wurde. 61 | Deshalb empfehle ich den Test ab einem Alter von 50 Jahren bzw. 40 Jahren, wenn man einen direkten Verwandten hat, der bereits an Prostatakrebs erkrankt war. 62 | Bei Männern afro-amerikanischer Herkunft besteht ebenfalls ein höheres Risiko. 63 | Das Wichtigste ist, die richtige Entscheidung zu treffen, sobald eine Krebserkrankung erkannt wurde. 64 | Es gibt Krebsarten, die aggressiv und andere, die indolent sind. 65 | Dem Patienten muss das Ausmaß des Risikos seiner Krebserkrankung wirklich verständlich gemacht werden, indem ihm die möglichen Optionen aufgezeigt werden, bei denen Prostatakrebse unbehandelt bleiben kann, wenn er die Lebensqualität auf lange Sicht nicht beeinträchtigt, und man sich in diesen Fällen eher für eine aktive Überwachung der Krankheit entscheidet. 66 | Heute findet bei vielen Männern, bei denen man Krebs erkannt hat, keine Behandlung statt, da ihr Krebs nicht aggressiv ist und keine Bedrohung für ihr Leben darstellt. 67 | Man schlägt ihnen eine aktive Überwachung vor und bietet ihnen bei Fortschreiten der Krankheit eine Behandlung an. 68 | Man bestimmt mit zunehmender Genauigkeit die Kriterien für eine Entscheidung, wer behandelt werden sollte und wer nicht. 69 | Deshalb empfehle ich trotzdem die Durchführung des Tests. 70 | Am wichtigsten ist es aber, mit seinem Arzt zu sprechen, um zu bestimmen, ob er durchgeführt werden sollte oder nicht. 71 | In Zusammenarbeit mit der internationalen Gesellschaft für Urologie hat Movember ein Instrument entwickelt, mit dem es möglich ist, die Vor- und Nachteile des PSA-Tests zu bewerten. 72 | Dieses Dokument (derzeit nur auf Englisch, aber eine Übersetzung wird in Kürze verfügbar sein) kann unter folgender Adresse heruntergeladen werden: http://ca.movember.com/fr/mens-health/prostate-cancer-screening 73 | Vorbeugen der Krankheit 74 | Leider gibt es kein Wundermittel zur Vorbeugung von Krebs. 75 | Trotz der Fortschritte in der Forschung bleibt die Annahme einer gesunden Lebensweise der beste Weg, um das Risiko zu verringern, an ihm zu erkranken. 76 | Es wird geschätzt, dass 30% der Krebserkrankungen verhindert werden könnten, wenn alle gut essen und sich ausreichend bewegen würden. 77 | "Wenn es keine Raucher mehr gäbe, würde dieser Wert mindestens auf 50% ansteigen", betont André Beaulieu, Sprecher der kanadischen Krebsgesellschaft. 78 | Allerdings wird davon ausgegangen, dass etwa 10% der Krebserkrankungen durch Vererbung verursacht werden. 79 | Mehrere bleiben auch völlig ungeklärt. 80 | Für die kanadische Krebsgesellschaft bleibt der Kampf gegen die Tabakabhängigkeit eine Priorität, trotz des Rückgangs der Zahl der Raucher. 81 | Das Rauchen ist verantwortlich für 85% der Fälle von Lungenkrebs. 82 | Es ist auch ein Risikofaktor für mehrere andere Krebsarten. 83 | Es schädigt stark die Gesundheit der Menschen. 84 | "Heute noch gibt es in Quebec 1,5 Mio. Raucher", bedauert der Sprecher André Beaulieu. 85 | Ermutigende Daten: 10 Jahre nach dem Verzicht auf die Zigarette, verringert sich das Risiko an Krebs zu sterben um die Hälfte. 86 | Das Gewicht 87 | Gemäß der kanadischen Krebsgesellschaft fördern Übergewicht und Adipositas ebenfalls den Ausbruch der Krankheit. 88 | Sie scheinen das Risiko für Brust-, Grimmdarm-, Mastdarm-, Speiseröhren-, Bauchspeicheldrüsen- und Gebärmutterkrebs zu erhöhen. 89 | "Die Forschung zeigt, dass eine regelmäßige körperliche Aktivität während des gesamten Lebens gegen Grimmdarmkrebs schützt", fügt er hinzu. 90 | Die Ernährung 91 | Die Organisation empfiehlt ebenfalls, den Verzehr von rotem Fleisch zu begrenzen. 92 | In zu großer Menge erhöht es das Risiko, an einem Grimm- bzw. Mastdarmkrebs zu erkranken. 93 | Wurstwaren sollten ebenfalls gemieden werden. 94 | Die Konservierung von Fleisch durch Räuchern, Trocknen oder Salzen kann zur Bildung von Karzinogenen führen. 95 | "Letztere können die Körperzellen schädigen und zur Entwicklung von Krebs führen", erklärt er. 96 | Vitamine 97 | In den letzten Jahren haben mehrere Wissenschaftler den Zusammenhang zwischen Vitaminzusätzen und Krebs untersucht. 98 | Ihre Untersuchungen sind aber derzeit nicht schlüssig. 99 | Nach der kanadischen Krebsgesellschaft sind die Studien zu Vitamin E widersprüchlich. 100 | Während eine Studie die Verringerung des Risikos von Prostatakrebs herausgefunden hat, zeigte eine andere eher eine Erhöhung. 101 | -------------------------------------------------------------------------------- /data/newstest2013.en: -------------------------------------------------------------------------------- 1 | A Republican strategy to counter the re-election of Obama 2 | Republican leaders justified their policy by the need to combat electoral fraud. 3 | However, the Brennan Centre considers this a myth, stating that electoral fraud is rarer in the United States than the number of people killed by lightning. 4 | Indeed, Republican lawyers identified only 300 cases of electoral fraud in the United States in a decade. 5 | One thing is certain: these new provisions will have a negative impact on voter turn-out. 6 | In this sense, the measures will partially undermine the American democratic system. 7 | Unlike in Canada, the American States are responsible for the organisation of federal elections in the United States. 8 | It is in this spirit that a majority of American governments have passed new laws since 2009 making the registration or voting process more difficult. 9 | This phenomenon gained momentum following the November 2010 elections, which saw 675 new Republican representatives added in 26 States. 10 | As a result, 180 bills restricting the exercise of the right to vote in 41 States were introduced in 2011 alone. 11 | The new election laws require voters to show a photo ID card and proof of US citizenship. 12 | Furthermore, these laws also reduce early voting periods, invalidate the right to register as a voter on election day and withdraw the right to vote of citizens with a criminal record. 13 | Before the 2006 elections, no US State required voters to show a photo ID card. 14 | Indiana was the first State to impose such a requirement. 15 | In 2008, the Supreme Court of the United States upheld the constitutionality of the Indiana law. 16 | The Republican authorities were quick to extend this practice to other States. 17 | Over the past two years, they sponsored bills in 34 States to force voters to show a photo ID card. 18 | It is important to note that, unlike Quebec, American citizens do not have a universal ID card such as the health insurance card. 19 | In fact, 11% of American citizens, i.e. 21 million people of voting age, do not possess a photo ID card issued by a government agency of their State. 20 | In addition, five million new voters in 2012 do not have such identification. 21 | And it often costs over a hundred dollars to obtain the required identity card. 22 | The new restrictions disproportionately affect young people, minorities and people with low incomes. 23 | In fact, 25% of African Americans, 15% of those earning less than $35,000; 18% of citizens over 65 and 20% of voters 18 to 29 years old do not have the required photo ID card. 24 | And that's not all. 25 | Students, voters considered to be voting more for Democratic candidates, are not allowed in several States to use the photo ID card issued by their institution. 26 | On the other hand, these same States allow fishing or hunting club members, who vote more Republican, to use the cards issued by these clubs when they vote. 27 | Prior to 2004, no State required proof of citizenship to vote. 28 | Arizona was the first to introduce such a requirement. 29 | Since 2011, a dozen States have adopted laws requiring voters to prove they are American citizens. 30 | These measures are clearly intended to limit the Hispanic vote. 31 | However, it appears that two out of three Hispanic voters favour the Democratic party. 32 | What is more, in 2011 Republican legislators sponsored laws abolishing the registration of voters on election day in eight States. 33 | In addition, they limited the right of individuals and groups to provide assistance to voters wishing to register. 34 | These restrictions are not without consequence. 35 | For example, during the 2004 general election, voter registration campaigns contributed to registering around 10 million citizens. 36 | However, the measures adopted since 2009 have led to a 17% drop in the registration rate of new voters in 2010 compared to 2006. 37 | In addition, Republican legislators have enacted laws in five other States aimed at reducing the early voting period. 38 | For example, during the 2008 general election in Florida, 33% of early voters were African-Americans, who accounted however for only 13% of voters in the State. 39 | The same applied to Hispanics. 40 | These represented only 11% of voters, but 24% of citizens who voted early. 41 | On the other hand, 76% of voters were white but these represented only 46% of early voters. 42 | Of course, Democratic legislators and their supporters vigorously opposed the adoption of laws restricting voter registration. 43 | Several bills were blocked by vetoes of Democratic governors. 44 | The United States Attorney General intervened to suspend the most controversial laws. 45 | They were able to partially limit the damage. 46 | For example, only 16 out of 34 States have adopted laws requiring the presentation of a photo ID card. 47 | However, the new rules put in place will undoubtedly make it more difficult to exercise the right to vote in 2012. 48 | Democratic critics denounce the partisan character of the laws that have been passed and they see a clear objective of influencing the 2012 results in key States. 49 | A 2011 Brennan Centre report shows that the States that have adopted these laws represent 171 of the 270 votes needed in the electoral college to win the Presidency. 50 | It is too early to say with certainty that these legislative changes in the electoral system will have significant impacts on the outcome of the 2012 presidential elections. 51 | But one thing is certain: these new provisions will have a negative impact on the turn-out. 52 | In this sense, the measures will partially undermine the American democratic system. 53 | Prostate cancer screening: take the test or not? 54 | Indeed, the PSA test sometimes shows erroneous results with false negative or even false positive results, which involve unnecessary medical interventions. 55 | Enough to make already reluctant men hesitate to take screening tests. 56 | Take the test or not? 57 | We asked two specialists for their opinion. 58 | In studies conducted in the United States, there was a lot of contamination between control groups, so it is difficult to interpret the data and make firm recommendations. 59 | Another study, this time a European one, concluded that there was a difference in mortality between patients who were screened and those who were not. 60 | This study also showed, with a follow-up after 12 years, that it is between 30 and 40% more likely for metastases to occur in the absence of screening. 61 | I therefore recommend the test from age 50, or 40 if you have a direct relative who previously had prostate cancer. 62 | African-American men are also more at risk. 63 | The key is to make the right decision once cancer has been detected. 64 | There are aggressive cancers and others that are indolent. 65 | The patient really needs to be made to understand the degree of risk of his cancer, by offering him the options available, not necessarily treating prostate cancers that are not long-term life threatening, and opting instead, in such cases, for active monitoring of the disease. 66 | Today, many men in whom cancer has been detected will not be treated because their cancer is not aggressive and is not life threatening. 67 | Active monitoring will be suggested, and if the disease progresses, they will be offered treatment. 68 | More and more, specific criteria are being determined in order to decide who should or should not be treated. 69 | Therefore I recommend taking the test. 70 | But the important thing is to have a discussion with your doctor to determine whether or not to take it. 71 | In collaboration with the Société internationale d'urologie [SIU], Movember has created a tool that makes it possible to evaluate the pros and cons of the PSA test. 72 | You can download the document (in English for the time being, a [French] translation will be available shortly) at this address: http://ca.movember.com/fr/mens-health/prostate-cancer-screening 73 | Preventing the disease 74 | Unfortunately, there is no miracle recipe for preventing cancer. 75 | Despite the progress in research, the adoption of healthy living habits remains the best way to reduce the risk of suffering from it. 76 | It is estimated that if everyone ate well and exercised enough, 30% of cancers could be prevented. 77 | "If no more people smoked, this rate would increase to at least 50%," says André Beaulieu, spokesman for the Canadian Cancer Society. 78 | On the other hand, it is estimated that roughly 10% of cancers are hereditary. 79 | Some are also completely unexplained. 80 | For the Canadian Cancer Society, the fight against tobacco remains a priority, despite the decrease in the number of smokers. 81 | Cigarettes are linked to 85% of lung cancer cases. 82 | It is also a risk factor for a number of others. 83 | This massively damages people's health. 84 | "Even today, there are 1.5 million smokers in Quebec" deplores spokesperson André Beaulieu. 85 | Encouraging data: 10 years after giving up smoking, the risk of dying from cancer drops by half. 86 | Weight 87 | Overweight and obesity are also conducive to the onset of the disease, according to the SCC. 88 | They can increase the risks of cancer of the breast, colon and rectum, oesophagus, pancreas and uterus. 89 | "Research shows that the regular practice of physical activity throughout your life protects against colon cancer" it is also said. 90 | Diet 91 | The organisation also recommends limiting your consumption of red meat. 92 | In large amounts, it increases the risks of developing colo-rectal cancer. 93 | Likewise, so do cured meat products, and these should be avoided. 94 | The conservation of meat by smoking, drying or curing can cause the formation of carcinogens. 95 | "They can damage cells in the body and lead to the development of cancer" it is explained. 96 | Vitamins 97 | In recent years, a number of scientists have studied the links between vitamin supplements and cancer. 98 | For the time being however their research is inconclusive. 99 | Studies on vitamin E are contradictory, according to the SCC. 100 | While one study noted a decrease in the risk of prostate cancer, another noted an increase. 101 | -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from __future__ import print_function 4 | import torch 5 | import argparse 6 | 7 | from data import data_utils 8 | from data.data_utils import read_parallel_corpus 9 | from data.data_utils import build_vocab 10 | from data.data_utils import convert_text2idx 11 | 12 | 13 | def main(opt): 14 | train_src, train_tgt = read_parallel_corpus(opt.train_src, opt.train_tgt, opt.max_len, opt.lower_case) 15 | dev_src, dev_tgt = read_parallel_corpus(opt.dev_src, opt.dev_tgt, None, opt.lower_case) 16 | 17 | if opt.vocab: 18 | src_counter, src_word2idx, src_idx2word, = torch.load(opt.vocab)['src_dict'] 19 | tgt_counter, tgt_word2idx, tgt_idx2word, = torch.load(opt.vocab)['tgt_dict'] 20 | else: 21 | if opt.share_vocab: 22 | print('Building shared vocabulary') 23 | vocab_size = min(opt.src_vocab_size, opt.tgt_vocab_size) \ 24 | if (opt.src_vocab_size is not None and opt.tgt_vocab_size is not None) else None 25 | counter, word2idx, idx2word = build_vocab(train_src + train_tgt, vocab_size, 26 | opt.min_word_count, data_utils.extra_tokens) 27 | src_counter, src_word2idx, src_idx2word = (counter, word2idx, idx2word) 28 | tgt_counter, tgt_word2idx, tgt_idx2word = (counter, word2idx, idx2word) 29 | else: 30 | src_counter, src_word2idx, src_idx2word = build_vocab(train_src, opt.src_vocab_size, 31 | opt.min_word_count, data_utils.extra_tokens) 32 | tgt_counter, tgt_word2idx, tgt_idx2word = build_vocab(train_tgt, opt.tgt_vocab_size, 33 | opt.min_word_count, data_utils.extra_tokens) 34 | train_src, train_tgt = \ 35 | convert_text2idx(train_src, src_word2idx), convert_text2idx(train_tgt, tgt_word2idx) 36 | dev_src, dev_tgt = \ 37 | convert_text2idx(dev_src, src_word2idx), convert_text2idx(dev_tgt, tgt_word2idx) 38 | 39 | # Save source/target vocabulary and train/dev data 40 | torch.save( 41 | { 42 | 'src_dict' : (src_counter, src_word2idx, src_idx2word), 43 | 'tgt_dict' : (tgt_counter, tgt_word2idx, tgt_idx2word), 44 | 'src_path' : opt.train_src, 45 | 'tgt_path' : opt.train_tgt, 46 | 'lower_case': opt.lower_case 47 | } 48 | ,'{}.dict'.format(opt.save_data) 49 | ) 50 | torch.save( 51 | { 52 | 'train_src': train_src, 'train_tgt': train_tgt, 53 | 'dev_src' : dev_src, 'dev_tgt' : dev_tgt, 54 | 'src_dict' : src_word2idx, 'tgt_dict' : tgt_word2idx, 55 | } 56 | , '{}-train.t7'.format(opt.save_data) 57 | ) 58 | print('Saved the vocabulary at {}.dict'.format(opt.save_data)) 59 | print('Saved the preprocessed train/dev data at {}-train.t7'.format(opt.save_data)) 60 | 61 | 62 | if __name__ == '__main__': 63 | parser = argparse.ArgumentParser(description='Preprocessing') 64 | 65 | parser.add_argument('-train_src', required=True, type=str, help='Path to training source data') 66 | parser.add_argument('-train_tgt', required=True, type=str, help='Path to training target data') 67 | parser.add_argument('-dev_src', required=True, type=str, help='Path to devation source data') 68 | parser.add_argument('-dev_tgt', required=True, type=str, help='Path to devation target data') 69 | parser.add_argument('-vocab', type=str, help='Path to an existing vocabulary file') 70 | parser.add_argument('-src_vocab_size', type=int, help='Source vocabulary size') 71 | parser.add_argument('-tgt_vocab_size', type=int, help='Target vocabulary size') 72 | parser.add_argument('-min_word_count', type=int, default=1) 73 | parser.add_argument('-max_len', type=int, default=50, help='Maximum sequence length') 74 | parser.add_argument('-lower_case', action='store_true') 75 | parser.add_argument('-share_vocab', action='store_true') 76 | parser.add_argument('-save_data', required=True, type=str, help='Output file for the prepared data') 77 | 78 | opt = parser.parse_args() 79 | print(opt) 80 | main(opt) -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import sys 4 | import time 5 | import math 6 | import argparse 7 | 8 | import torch 9 | import torch.nn as nn 10 | from torch.nn.utils import clip_grad_norm 11 | import torch.optim as optim 12 | 13 | from data import data_utils 14 | from data.data_utils import load_train_data 15 | from transformer.models import Transformer 16 | from transformer.optimizer import ScheduledOptimizer 17 | 18 | use_cuda = torch.cuda.is_available() 19 | 20 | 21 | def create_model(opt): 22 | data = torch.load(opt.data_path) 23 | opt.src_vocab_size = len(data['src_dict']) 24 | opt.tgt_vocab_size = len(data['tgt_dict']) 25 | 26 | print('Creating new model parameters..') 27 | model = Transformer(opt) # Initialize a model state. 28 | model_state = {'opt': opt, 'curr_epochs': 0, 'train_steps': 0} 29 | 30 | # If opt.model_path exists, load model parameters. 31 | if os.path.exists(opt.model_path): 32 | print('Reloading model parameters..') 33 | model_state = torch.load(opt.model_path) 34 | model.load_state_dict(model_state['model_params']) 35 | 36 | if use_cuda: 37 | print('Using GPU..') 38 | model = model.cuda() 39 | 40 | return model, model_state 41 | 42 | 43 | def main(opt): 44 | print('Loading training and development data..') 45 | _, _, train_iter, dev_iter = load_train_data(opt.data_path, opt.batch_size, 46 | opt.max_src_seq_len, opt.max_tgt_seq_len, use_cuda) 47 | # Create a new model or load an existing one. 48 | model, model_state = create_model(opt) 49 | init_epoch = model_state['curr_epochs'] 50 | if init_epoch >= opt.max_epochs: 51 | print('Training is already complete.', 52 | 'current_epoch:{}, max_epoch:{}'.format(init_epoch, opt.max_epochs)) 53 | sys.exit(0) 54 | 55 | # Loss and Optimizer 56 | # If size_average=True (default): Loss for a mini-batch is averaged over non-ignore index targets. 57 | criterion = nn.CrossEntropyLoss(size_average=False, ignore_index=data_utils.PAD) 58 | optimizer = ScheduledOptimizer(optim.Adam(model.trainable_params(), betas=(0.9, 0.98), eps=1e-9), 59 | opt.d_model, opt.n_layers, opt.n_warmup_steps) 60 | if opt.log: 61 | log_train_file = opt.log + '.train.log' 62 | log_dev_file = opt.log + '.valid.log' 63 | if not os.path.exists(log_train_file) and os.path.exists(log_dev_file): 64 | with open(log_train_file, 'w') as log_tf, open(log_dev_file, 'w') as log_df: 65 | log_tf.write('epoch,ppl,sents_seen\n') 66 | log_df.write('epoch,ppl,sents_seen\n') 67 | print('Training and validation log will be written in {} and {}' 68 | .format(log_train_file, log_dev_file)) 69 | 70 | for epoch in range(init_epoch + 1, opt.max_epochs + 1): 71 | # Execute training steps for 1 epoch. 72 | train_loss, train_sents = train(model, criterion, optimizer, train_iter, model_state) 73 | print('Epoch {}'.format(epoch), 'Train_ppl: {0:.2f}'.format(train_loss), 74 | 'Sents seen: {}'.format(train_sents)) 75 | 76 | # Execute a validation step. 77 | eval_loss, eval_sents = eval(model, criterion, dev_iter) 78 | print('Epoch {}'.format(epoch), 'Eval_ppl: {0:.2f}'.format(eval_loss), 79 | 'Sents seen: {}'.format(eval_sents)) 80 | 81 | # Save the model checkpoint in every 1 epoch. 82 | model_state['curr_epochs'] += 1 83 | model_state['model_params'] = model.state_dict() 84 | torch.save(model_state, opt.model_path) 85 | print('The model checkpoint file has been saved') 86 | 87 | if opt.log and log_train_file and log_dev_file: 88 | with open(log_train_file, 'a') as log_tf, open(log_dev_file, 'a') as log_df: 89 | log_tf.write('{epoch},{ppl:0.2f},{sents}\n'.format( 90 | epoch=epoch, ppl=train_loss, sents=train_sents, )) 91 | log_df.write('{epoch},{ppl:0.2f},{sents}\n'.format( 92 | epoch=epoch, ppl=eval_loss, sents=eval_sents, )) 93 | 94 | 95 | def train(model, criterion, optimizer, train_iter, model_state): # TODO: fix opt 96 | model.train() 97 | opt = model_state['opt'] 98 | train_loss, train_loss_total = 0.0, 0.0 99 | n_words, n_words_total = 0, 0 100 | n_sents, n_sents_total = 0, 0 101 | start_time = time.time() 102 | 103 | for batch_idx, batch in enumerate(train_iter): 104 | enc_inputs, enc_inputs_len = batch.src 105 | dec_, dec_inputs_len = batch.trg 106 | dec_inputs = dec_[:, :-1] 107 | dec_targets = dec_[:, 1:] 108 | dec_inputs_len = dec_inputs_len - 1 109 | 110 | # Execute a single training step: forward 111 | optimizer.zero_grad() 112 | dec_logits, _, _, _ = model(enc_inputs, enc_inputs_len, 113 | dec_inputs, dec_inputs_len) 114 | step_loss = criterion(dec_logits, dec_targets.contiguous().view(-1)) 115 | 116 | # Execute a single training step: backward 117 | step_loss.backward() 118 | if opt.max_grad_norm: 119 | clip_grad_norm(model.trainable_params(), float(opt.max_grad_norm)) 120 | optimizer.step() 121 | optimizer.update_lr() 122 | model.proj_grad() # works only for weighted transformer 123 | 124 | train_loss_total += float(step_loss.data[0]) 125 | n_words_total += torch.sum(dec_inputs_len) 126 | n_sents_total += dec_inputs_len.size(0) # batch_size 127 | model_state['train_steps'] += 1 128 | 129 | # Display training status 130 | if model_state['train_steps'] % opt.display_freq == 0: 131 | loss_int = (train_loss_total - train_loss) 132 | n_words_int = (n_words_total - n_words) 133 | n_sents_int = (n_sents_total - n_sents) 134 | 135 | loss_per_words = loss_int / n_words_int 136 | avg_ppl = math.exp(loss_per_words) if loss_per_words < 300 else float("inf") 137 | time_elapsed = (time.time() - start_time) 138 | step_time = time_elapsed / opt.display_freq 139 | 140 | n_words_sec = n_words_int / time_elapsed 141 | n_sents_sec = n_sents_int / time_elapsed 142 | 143 | print('Epoch {0:<3}'.format(model_state['curr_epochs']), 'Step {0:<10}'.format(model_state['train_steps']), 144 | 'Perplexity {0:<10.2f}'.format(avg_ppl), 'Step-time {0:<10.2f}'.format(step_time), 145 | '{0:.2f} sents/s'.format(n_sents_sec), '{0:>10.2f} words/s'.format(n_words_sec)) 146 | train_loss, n_words, n_sents = (train_loss_total, n_words_total, n_sents_total) 147 | start_time = time.time() 148 | 149 | # return per_word_loss over 1 epoch 150 | return math.exp(train_loss_total / n_words_total), n_sents_total 151 | 152 | 153 | def eval(model, criterion, dev_iter): 154 | model.eval() 155 | eval_loss_total = 0.0 156 | n_words_total, n_sents_total = 0, 0 157 | 158 | print('Evaluation') 159 | with torch.no_grad(): 160 | for batch_idx, batch in enumerate(dev_iter): 161 | enc_inputs, enc_inputs_len = batch.src 162 | dec_, dec_inputs_len = batch.trg 163 | dec_inputs = dec_[:, :-1] 164 | dec_targets = dec_[:, 1:] 165 | dec_inputs_len = dec_inputs_len - 1 166 | 167 | dec_logits, *_ = model(enc_inputs, enc_inputs_len, dec_inputs, dec_inputs_len) 168 | step_loss = criterion(dec_logits, dec_targets.contiguous().view(-1)) 169 | eval_loss_total += float(step_loss.data[0]) 170 | n_words_total += torch.sum(dec_inputs_len) 171 | n_sents_total += dec_inputs_len.size(0) 172 | print(' {} samples seen'.format(n_sents_total)) 173 | 174 | # return per_word_loss 175 | return math.exp(eval_loss_total / n_words_total), n_sents_total 176 | 177 | 178 | if __name__ == '__main__': 179 | parser = argparse.ArgumentParser(description='Training Hyperparams') 180 | # data loading params 181 | parser.add_argument('-data_path', required=True, help='Path to the preprocessed data') 182 | 183 | # network params 184 | parser.add_argument('-d_model', type=int, default=512) 185 | parser.add_argument('-d_k', type=int, default=64) 186 | parser.add_argument('-d_v', type=int, default=64) 187 | parser.add_argument('-d_ff', type=int, default=2048) 188 | parser.add_argument('-n_heads', type=int, default=8) 189 | parser.add_argument('-n_layers', type=int, default=6) 190 | parser.add_argument('-dropout', type=float, default=0.1) 191 | parser.add_argument('-share_proj_weight', action='store_true') 192 | parser.add_argument('-share_embs_weight', action='store_true') 193 | parser.add_argument('-weighted_model', action='store_true') 194 | 195 | # training params 196 | parser.add_argument('-lr', type=float, default=0.0002) 197 | parser.add_argument('-max_epochs', type=int, default=10) 198 | parser.add_argument('-batch_size', type=int, default=128) 199 | parser.add_argument('-max_src_seq_len', type=int, default=50) 200 | parser.add_argument('-max_tgt_seq_len', type=int, default=50) 201 | parser.add_argument('-max_grad_norm', type=float, default=None) 202 | parser.add_argument('-n_warmup_steps', type=int, default=4000) 203 | parser.add_argument('-display_freq', type=int, default=100) 204 | parser.add_argument('-log', default=None) 205 | parser.add_argument('-model_path', type=str, required=True) 206 | 207 | opt = parser.parse_args() 208 | print(opt) 209 | main(opt) 210 | print('Terminated') 211 | -------------------------------------------------------------------------------- /transformer/__init__.py: -------------------------------------------------------------------------------- 1 | import transformer.modules 2 | import transformer.sublayers 3 | import transformer.layers 4 | import transformer.models 5 | import transformer.translator 6 | import transformer.beam 7 | 8 | __all__ = [ 9 | modules, sublayers, layers, 10 | models, translator, beam] 11 | -------------------------------------------------------------------------------- /transformer/beam.py: -------------------------------------------------------------------------------- 1 | """ Manage beam search info structure. 2 | Heavily borrowed from OpenNMT-py. 3 | For code in OpenNMT-py, please check the following link: 4 | https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Beam.py 5 | """ 6 | 7 | import torch 8 | import numpy as np 9 | from data import data_utils 10 | 11 | 12 | class Beam(object): 13 | ''' Store the necessary info for beam search ''' 14 | def __init__(self, size, use_cuda=False): 15 | self.size = size 16 | self.done = False 17 | 18 | self.tt = torch.cuda if use_cuda else torch 19 | 20 | # The score for each translation on the beam. 21 | self.scores = self.tt.FloatTensor(size).zero_() 22 | self.all_scores = [] 23 | 24 | # The backpointers at each time-step. 25 | self.prev_ks = [] 26 | 27 | # The outputs at each time-step. 28 | self.next_ys = [self.tt.LongTensor(size).fill_(data_utils.PAD)] 29 | self.next_ys[0][0] = data_utils.BOS 30 | 31 | def get_current_state(self): 32 | "Get the outputs for the current timestep." 33 | return self.get_tentative_hypothesis() 34 | 35 | def get_current_origin(self): 36 | "Get the backpointers for the current timestep." 37 | return self.prev_ks[-1] 38 | 39 | def advance(self, word_lk): 40 | "Update the status and check for finished or not." 41 | num_words = word_lk.size(1) 42 | 43 | # Sum the previous scores. 44 | if len(self.prev_ks) > 0: 45 | beam_lk = word_lk + self.scores.unsqueeze(1).expand_as(word_lk) 46 | else: 47 | beam_lk = word_lk[0] 48 | 49 | flat_beam_lk = beam_lk.view(-1) 50 | 51 | best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort 52 | best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 2nd sort TODO 53 | 54 | self.all_scores.append(self.scores) 55 | self.scores = best_scores 56 | 57 | # bestScoresId is flattened beam_size * tgt_vocab_size array, so calculate 58 | # which word and beam each score came from 59 | prev_k = best_scores_id / num_words 60 | self.prev_ks.append(prev_k) 61 | self.next_ys.append(best_scores_id - prev_k * num_words) 62 | 63 | # End condition is when top-of-beam is EOS. 64 | if self.next_ys[-1][0] == data_utils.EOS: 65 | self.done = True 66 | self.all_scores.append(self.scores) 67 | 68 | return self.done 69 | 70 | def sort_scores(self): 71 | "Sort the scores." 72 | return torch.sort(self.scores, 0, True) 73 | 74 | def get_the_best_score_and_idx(self): 75 | "Get the score of the best in the beam." 76 | scores, ids = self.sort_scores() 77 | return scores[1], ids[1] 78 | 79 | def get_tentative_hypothesis(self): 80 | "Get the decoded sequence for the current timestep." 81 | 82 | if len(self.next_ys) == 1: 83 | dec_seq = self.next_ys[0].unsqueeze(1) 84 | else: 85 | _, keys = self.sort_scores() 86 | hyps = [self.get_hypothesis(k) for k in keys] 87 | hyps = [[data_utils.BOS] + h for h in hyps] 88 | dec_seq = torch.from_numpy(np.array(hyps)) 89 | 90 | return dec_seq 91 | 92 | def get_hypothesis(self, k): 93 | """ 94 | Walk back to construct the full hypothesis. 95 | Parameters. 96 | * `k` - the position in the beam to construct. 97 | Returns. 98 | 1. The hypothesis 99 | 2. The attention at each time step. 100 | """ 101 | hyp = [] 102 | for j in range(len(self.prev_ks)-1, -1, -1): 103 | hyp.append(self.next_ys[j + 1][k]) 104 | k = self.prev_ks[j][k] 105 | 106 | return hyp[::-1] 107 | -------------------------------------------------------------------------------- /transformer/layers.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | from transformer.sublayers import MultiHeadAttention 4 | from transformer.sublayers import MultiBranchAttention 5 | from transformer.sublayers import PoswiseFeedForwardNet 6 | 7 | 8 | class EncoderLayer(nn.Module): 9 | def __init__(self, d_k, d_v, d_model, d_ff, n_heads, dropout=0.1): 10 | super(EncoderLayer, self).__init__() 11 | self.enc_self_attn = MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout) 12 | self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout) 13 | 14 | def forward(self, enc_inputs, self_attn_mask): 15 | enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, 16 | enc_inputs, attn_mask=self_attn_mask) 17 | enc_outputs = self.pos_ffn(enc_outputs) 18 | 19 | return enc_outputs, attn 20 | 21 | 22 | class WeightedEncoderLayer(nn.Module): 23 | def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout=0.1): 24 | super(WeightedEncoderLayer, self).__init__() 25 | self.enc_self_attn = MultiBranchAttention(d_k, d_v, d_model, d_ff, n_branches, dropout) 26 | 27 | def forward(self, enc_inputs, self_attn_mask): 28 | return self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, attn_mask=self_attn_mask) 29 | 30 | 31 | class DecoderLayer(nn.Module): 32 | def __init__(self, d_k, d_v, d_model, d_ff, n_heads, dropout=0.1): 33 | super(DecoderLayer, self).__init__() 34 | self.dec_self_attn = MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout) 35 | self.dec_enc_attn = MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout) 36 | self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout) 37 | 38 | def forward(self, dec_inputs, enc_outputs, self_attn_mask, enc_attn_mask): 39 | dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, 40 | dec_inputs, attn_mask=self_attn_mask) 41 | dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, 42 | enc_outputs, attn_mask=enc_attn_mask) 43 | dec_outputs = self.pos_ffn(dec_outputs) 44 | 45 | return dec_outputs, dec_self_attn, dec_enc_attn 46 | 47 | 48 | class WeightedDecoderLayer(nn.Module): 49 | def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout=0.1): 50 | super(WeightedDecoderLayer, self).__init__() 51 | self.dec_self_attn = MultiHeadAttention(d_k, d_v, d_model, n_branches, dropout) 52 | self.dec_enc_attn = MultiBranchAttention(d_k, d_v, d_model, d_ff, n_branches, dropout) 53 | 54 | def forward(self, dec_inputs, enc_outputs, self_attn_mask, enc_attn_mask): 55 | dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, 56 | dec_inputs, attn_mask=self_attn_mask) 57 | dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, 58 | enc_outputs, attn_mask=enc_attn_mask) 59 | 60 | return dec_outputs, dec_self_attn, dec_enc_attn -------------------------------------------------------------------------------- /transformer/models.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import torch.nn as nn 4 | import numpy as np 5 | from data import data_utils 6 | 7 | from transformer.modules import Linear 8 | from transformer.modules import PosEncoding 9 | from transformer.layers import EncoderLayer, DecoderLayer, \ 10 | WeightedEncoderLayer, WeightedDecoderLayer 11 | 12 | 13 | def proj_prob_simplex(inputs): 14 | # project updated weights onto a probability simplex 15 | # see https://arxiv.org/pdf/1101.6081.pdf 16 | sorted_inputs, sorted_idx = torch.sort(inputs.view(-1), descending=True) 17 | dim = len(sorted_inputs) 18 | for i in reversed(range(dim)): 19 | t = (sorted_inputs[:i+1].sum() - 1) / (i+1) 20 | if sorted_inputs[i] > t: 21 | break 22 | return torch.clamp(inputs-t, min=0.0) 23 | 24 | 25 | def get_attn_pad_mask(seq_q, seq_k): 26 | assert seq_q.dim() == 2 and seq_k.dim() == 2 27 | b_size, len_q = seq_q.size() 28 | b_size, len_k = seq_k.size() 29 | pad_attn_mask = seq_k.data.eq(data_utils.PAD).unsqueeze(1) # b_size x 1 x len_k 30 | return pad_attn_mask.expand(b_size, len_q, len_k) # b_size x len_q x len_k 31 | 32 | 33 | def get_attn_subsequent_mask(seq): 34 | assert seq.dim() == 2 35 | attn_shape = [seq.size(0), seq.size(1), seq.size(1)] 36 | subsequent_mask = np.triu(np.ones(attn_shape), k=1) 37 | subsequent_mask = torch.from_numpy(subsequent_mask).byte() 38 | if seq.is_cuda: 39 | subsequent_mask = subsequent_mask.cuda() 40 | 41 | return subsequent_mask 42 | 43 | 44 | class Encoder(nn.Module): 45 | def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads, 46 | max_seq_len, src_vocab_size, dropout=0.1, weighted=False): 47 | super(Encoder, self).__init__() 48 | self.d_model = d_model 49 | self.src_emb = nn.Embedding(src_vocab_size, d_model, padding_idx=data_utils.PAD,) 50 | self.pos_emb = PosEncoding(max_seq_len * 10, d_model) # TODO: *10 fix 51 | self.dropout_emb = nn.Dropout(dropout) 52 | self.layer_type = EncoderLayer if not weighted else WeightedEncoderLayer 53 | self.layers = nn.ModuleList( 54 | [self.layer_type(d_k, d_v, d_model, d_ff, n_heads, dropout) for _ in range(n_layers)]) 55 | 56 | def forward(self, enc_inputs, enc_inputs_len, return_attn=False): 57 | enc_outputs = self.src_emb(enc_inputs) 58 | enc_outputs += self.pos_emb(enc_inputs_len) # Adding positional encoding TODO: note 59 | enc_outputs = self.dropout_emb(enc_outputs) 60 | 61 | enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs) 62 | enc_self_attns = [] 63 | for layer in self.layers: 64 | enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask) 65 | if return_attn: 66 | enc_self_attns.append(enc_self_attn) 67 | 68 | return enc_outputs, enc_self_attns 69 | 70 | 71 | class Decoder(nn.Module): 72 | def __init__(self, n_layers, d_k, d_v, d_model, d_ff, n_heads, 73 | max_seq_len, tgt_vocab_size, dropout=0.1, weighted=False): 74 | super(Decoder, self).__init__() 75 | self.d_model = d_model 76 | self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model, padding_idx=data_utils.PAD, ) 77 | self.pos_emb = PosEncoding(max_seq_len * 10, d_model) # TODO: *10 fix 78 | self.dropout_emb = nn.Dropout(dropout) 79 | self.layer_type = DecoderLayer if not weighted else WeightedDecoderLayer 80 | self.layers = nn.ModuleList( 81 | [self.layer_type(d_k, d_v, d_model, d_ff, n_heads, dropout) for _ in range(n_layers)]) 82 | 83 | def forward(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False): 84 | dec_outputs = self.tgt_emb(dec_inputs) 85 | dec_outputs += self.pos_emb(dec_inputs_len) # Adding positional encoding # TODO: note 86 | dec_outputs = self.dropout_emb(dec_outputs) 87 | 88 | dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs) 89 | dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs) 90 | 91 | dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0) 92 | dec_enc_attn_pad_mask = get_attn_pad_mask(dec_inputs, enc_inputs) 93 | 94 | dec_self_attns, dec_enc_attns = [], [] 95 | for layer in self.layers: 96 | dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, 97 | self_attn_mask=dec_self_attn_mask, 98 | enc_attn_mask=dec_enc_attn_pad_mask) 99 | if return_attn: 100 | dec_self_attns.append(dec_self_attn) 101 | dec_enc_attns.append(dec_enc_attn) 102 | 103 | return dec_outputs, dec_self_attns, dec_enc_attns 104 | 105 | 106 | class Transformer(nn.Module): 107 | def __init__(self, opt): 108 | super(Transformer, self).__init__() 109 | self.encoder = Encoder(opt.n_layers, opt.d_k, opt.d_v, opt.d_model, opt.d_ff, opt.n_heads, 110 | opt.max_src_seq_len, opt.src_vocab_size, opt.dropout, opt.weighted_model) 111 | self.decoder = Decoder(opt.n_layers, opt.d_k, opt.d_v, opt.d_model, opt.d_ff, opt.n_heads, 112 | opt.max_tgt_seq_len, opt.tgt_vocab_size, opt.dropout, opt.weighted_model) 113 | self.tgt_proj = Linear(opt.d_model, opt.tgt_vocab_size, bias=False) 114 | self.weighted_model = opt.weighted_model 115 | 116 | if opt.share_proj_weight: 117 | print('Sharing target embedding and projection..') 118 | self.tgt_proj.weight = self.decoder.tgt_emb.weight 119 | 120 | if opt.share_embs_weight: 121 | print('Sharing source and target embedding..') 122 | assert opt.src_vocab_size == opt.tgt_vocab_size, \ 123 | 'To share word embeddings, the vocabulary size of src/tgt should be the same' 124 | self.encoder.src_emb.weight = self.decoder.tgt_emb.weight 125 | 126 | def trainable_params(self): 127 | # Avoid updating the position encoding 128 | params = filter(lambda p: p[1].requires_grad, self.named_parameters()) 129 | # Add a separate parameter group for the weighted_model 130 | param_groups = [] 131 | base_params = {'params': [], 'type': 'base'} 132 | weighted_params = {'params': [], 'type': 'weighted'} 133 | for name, param in params: 134 | if 'w_kp' in name or 'w_a' in name: 135 | weighted_params['params'].append(param) 136 | else: 137 | base_params['params'].append(param) 138 | param_groups.append(base_params) 139 | param_groups.append(weighted_params) 140 | 141 | return param_groups 142 | 143 | def encode(self, enc_inputs, enc_inputs_len, return_attn=False): 144 | return self.encoder(enc_inputs, enc_inputs_len, return_attn) 145 | 146 | def decode(self, dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn=False): 147 | return self.decoder(dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn) 148 | 149 | def forward(self, enc_inputs, enc_inputs_len, dec_inputs, dec_inputs_len, return_attn=False): 150 | enc_outputs, enc_self_attns = self.encoder(enc_inputs, enc_inputs_len, return_attn) 151 | dec_outputs, dec_self_attns, dec_enc_attns = \ 152 | self.decoder(dec_inputs, dec_inputs_len, enc_inputs, enc_outputs, return_attn) 153 | dec_logits = self.tgt_proj(dec_outputs) 154 | 155 | return dec_logits.view(-1, dec_logits.size(-1)), \ 156 | enc_self_attns, dec_self_attns, dec_enc_attns 157 | 158 | def proj_grad(self): 159 | if self.weighted_model: 160 | for name, param in self.named_parameters(): 161 | if 'w_kp' in name or 'w_a' in name: 162 | param.data = proj_prob_simplex(param.data) 163 | else: 164 | pass 165 | -------------------------------------------------------------------------------- /transformer/modules.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.init as init 6 | 7 | 8 | class Linear(nn.Module): 9 | def __init__(self, in_features, out_features, bias=True): 10 | super(Linear, self).__init__() 11 | self.linear = nn.Linear(in_features, out_features, bias=bias) 12 | init.xavier_normal_(self.linear.weight) 13 | init.zeros_(self.linear.bias) 14 | 15 | def forward(self, inputs): 16 | return self.linear(inputs) 17 | 18 | 19 | class ScaledDotProductAttention(nn.Module): 20 | def __init__(self, d_k, dropout=.1): 21 | super(ScaledDotProductAttention, self).__init__() 22 | self.scale_factor = np.sqrt(d_k) 23 | self.softmax = nn.Softmax(dim=-1) 24 | self.dropout = nn.Dropout(dropout) 25 | 26 | def forward(self, q, k, v, attn_mask=None): 27 | # q: [b_size x n_heads x len_q x d_k] 28 | # k: [b_size x n_heads x len_k x d_k] 29 | # v: [b_size x n_heads x len_v x d_v] note: (len_k == len_v) 30 | 31 | # attn: [b_size x n_heads x len_q x len_k] 32 | scores = torch.matmul(q, k.transpose(-1, -2)) / self.scale_factor 33 | if attn_mask is not None: 34 | assert attn_mask.size() == scores.size() 35 | scores.masked_fill_(attn_mask, -1e9) 36 | attn = self.dropout(self.softmax(scores)) 37 | 38 | # outputs: [b_size x n_heads x len_q x d_v] 39 | context = torch.matmul(attn, v) 40 | 41 | return context, attn 42 | 43 | 44 | class LayerNormalization(nn.Module): 45 | def __init__(self, d_hid, eps=1e-6): 46 | super(LayerNormalization, self).__init__() 47 | self.gamma = nn.Parameter(torch.ones(d_hid)) 48 | self.beta = nn.Parameter(torch.zeros(d_hid)) 49 | self.eps = eps 50 | 51 | def forward(self, z): 52 | mean = z.mean(dim=-1, keepdim=True,) 53 | std = z.std(dim=-1, keepdim=True,) 54 | ln_out = (z - mean) / (std + self.eps) 55 | ln_out = self.gamma * ln_out + self.beta 56 | 57 | return ln_out 58 | 59 | 60 | class PosEncoding(nn.Module): 61 | def __init__(self, max_seq_len, d_word_vec): 62 | super(PosEncoding, self).__init__() 63 | pos_enc = np.array( 64 | [[pos / np.power(10000, 2.0 * (j // 2) / d_word_vec) for j in range(d_word_vec)] 65 | for pos in range(max_seq_len)]) 66 | pos_enc[:, 0::2] = np.sin(pos_enc[:, 0::2]) 67 | pos_enc[:, 1::2] = np.cos(pos_enc[:, 1::2]) 68 | pad_row = np.zeros([1, d_word_vec]) 69 | pos_enc = np.concatenate([pad_row, pos_enc]).astype(np.float32) 70 | 71 | # additional single row for PAD idx 72 | self.pos_enc = nn.Embedding(max_seq_len + 1, d_word_vec) 73 | # fix positional encoding: exclude weight from grad computation 74 | self.pos_enc.weight = nn.Parameter(torch.from_numpy(pos_enc), requires_grad=False) 75 | 76 | def forward(self, input_len): 77 | max_len = torch.max(input_len) 78 | tensor = torch.cuda.LongTensor if input_len.is_cuda else torch.LongTensor 79 | input_pos = tensor([list(range(1, len+1)) + [0]*(max_len-len) for len in input_len]) 80 | 81 | return self.pos_enc(input_pos) -------------------------------------------------------------------------------- /transformer/optimizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class ScheduledOptimizer(object): 5 | "A simple wrapper class for learning rate scheduling" 6 | def __init__(self, optimizer, d_model, n_layers, n_warmup_steps): 7 | self.optimizer = optimizer 8 | self.d_model = d_model 9 | self.n_layers = n_layers 10 | self.n_warmup_steps = n_warmup_steps 11 | self.n_current_steps = 0 12 | 13 | def step(self): 14 | "Step by the inner optimizer" 15 | self.optimizer.step() 16 | 17 | def zero_grad(self): 18 | "Zero out the gradients by the inner optimizer" 19 | self.optimizer.zero_grad() 20 | 21 | def update_lr(self): 22 | "Learning rate scheduling per step" 23 | self.n_current_steps += 1 24 | new_lr = np.power(self.d_model, -0.5) * np.min([ 25 | np.power(self.n_current_steps, -0.5), 26 | np.power(self.n_warmup_steps, -1.5) * self.n_current_steps]) 27 | new_lr_weighted = np.power(self.d_model / self.n_layers, -0.5) * np.min([ 28 | np.power(self.n_current_steps, -0.5), 29 | np.power(self.n_warmup_steps / 10, -1.5) * self.n_current_steps]) 30 | 31 | for param_group in self.optimizer.param_groups: 32 | # set a separate lr for the weighted model 33 | param_group['lr'] = new_lr if param_group['type'] == 'base' else new_lr_weighted 34 | -------------------------------------------------------------------------------- /transformer/sublayers.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | 6 | from transformer.modules import Linear 7 | from transformer.modules import ScaledDotProductAttention 8 | from transformer.modules import LayerNormalization 9 | 10 | 11 | class _MultiHeadAttention(nn.Module): 12 | def __init__(self, d_k, d_v, d_model, n_heads, dropout): 13 | super(_MultiHeadAttention, self).__init__() 14 | self.d_k = d_k 15 | self.d_v = d_v 16 | self.d_model = d_model 17 | self.n_heads = n_heads 18 | 19 | self.w_q = Linear([d_model, d_k * n_heads]) 20 | self.w_k = Linear([d_model, d_k * n_heads]) 21 | self.w_v = Linear([d_model, d_v * n_heads]) 22 | 23 | self.attention = ScaledDotProductAttention(d_k, dropout) 24 | 25 | def forward(self, q, k, v, attn_mask): 26 | # q: [b_size x len_q x d_model] 27 | # k: [b_size x len_k x d_model] 28 | # v: [b_size x len_k x d_model] 29 | b_size = q.size(0) 30 | 31 | # q_s: [b_size x n_heads x len_q x d_k] 32 | # k_s: [b_size x n_heads x len_k x d_k] 33 | # v_s: [b_size x n_heads x len_k x d_v] 34 | q_s = self.w_q(q).view(b_size, -1, self.n_heads, self.d_k).transpose(1, 2) 35 | k_s = self.w_k(k).view(b_size, -1, self.n_heads, self.d_k).transpose(1, 2) 36 | v_s = self.w_v(v).view(b_size, -1, self.n_heads, self.d_v).transpose(1, 2) 37 | 38 | if attn_mask: # attn_mask: [b_size x len_q x len_k] 39 | attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1) 40 | # context: [b_size x n_heads x len_q x d_v], attn: [b_size x n_heads x len_q x len_k] 41 | context, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask) 42 | # context: [b_size x len_q x n_heads * d_v] 43 | context = context.transpose(1, 2).contiguous().view(b_size, -1, self.n_heads * self.d_v) 44 | 45 | # return the context and attention weights 46 | return context, attn 47 | 48 | 49 | class MultiHeadAttention(nn.Module): 50 | def __init__(self, d_k, d_v, d_model, n_heads, dropout): 51 | super(MultiHeadAttention, self).__init__() 52 | self.n_heads = n_heads 53 | self.multihead_attn = _MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout) 54 | self.proj = Linear(n_heads * d_v, d_model) 55 | self.dropout = nn.Dropout(dropout) 56 | self.layer_norm = LayerNormalization(d_model) 57 | 58 | def forward(self, q, k, v, attn_mask): 59 | # q: [b_size x len_q x d_model] 60 | # k: [b_size x len_k x d_model] 61 | # v: [b_size x len_v x d_model] note (len_k == len_v) 62 | residual = q 63 | # context: a tensor of shape [b_size x len_q x n_heads * d_v] 64 | context, attn = self.multihead_attn(q, k, v, attn_mask=attn_mask) 65 | 66 | # project back to the residual size, outputs: [b_size x len_q x d_model] 67 | output = self.dropout(self.proj(context)) 68 | return self.layer_norm(residual + output), attn 69 | 70 | 71 | class MultiBranchAttention(nn.Module): 72 | def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout): 73 | super(MultiBranchAttention, self).__init__() 74 | self.d_k = d_k 75 | self.d_v = d_v 76 | self.d_model = d_model 77 | self.d_ff = d_ff 78 | self.n_branches = n_branches 79 | 80 | self.multihead_attn = _MultiHeadAttention(d_k, d_v, d_model, n_branches, dropout) 81 | # additional parameters for BranchedAttention 82 | self.w_o = nn.ModuleList([Linear(d_v, d_model) for _ in range(n_branches)]) 83 | self.w_kp = torch.rand(n_branches) 84 | self.w_kp = nn.Parameter(self.w_kp/self.w_kp.sum()) 85 | self.w_a = torch.rand(n_branches) 86 | self.w_a = nn.Parameter(self.w_a/self.w_a.sum()) 87 | 88 | self.pos_ffn = nn.ModuleList([ 89 | PoswiseFeedForwardNet(d_model, d_ff//n_branches, dropout) for _ in range(n_branches)]) 90 | self.dropout = nn.Dropout(dropout) 91 | self.layer_norm = LayerNormalization(d_model) 92 | 93 | init.xavier_normal(self.w_o) 94 | 95 | def forward(self, q, k, v, attn_mask): 96 | # q: [b_size x len_q x d_model] 97 | # k: [b_size x len_k x d_model] 98 | # v: [b_size x len_v x d_model] note (len_k == len_v) 99 | residual = q 100 | 101 | # context: a tensor of shape [b_size x len_q x n_branches * d_v] 102 | context, attn = self.multih_attn(q, k, v, attn_mask=attn_mask) 103 | 104 | # context: a list of tensors of shape [b_size x len_q x d_v] len: n_branches 105 | context = context.split(self.d_v, dim=-1) 106 | 107 | # outputs: a list of tensors of shape [b_size x len_q x d_model] len: n_branches 108 | outputs = [self.w_o[i](context[i]) for i in range(self.n_branches)] 109 | outputs = [kappa * output for kappa, output in zip(self.w_kp, outputs)] 110 | outputs = [pos_ffn(output) for pos_ffn, output in zip(self.pos_ffn, outputs)] 111 | outputs = [alpha * output for alpha, output in zip(self.w_a, outputs)] 112 | 113 | # output: [b_size x len_q x d_model] 114 | output = self.dropout(torch.stack(outputs).sum(dim=0)) 115 | return self.layer_norm(residual + output), attn 116 | 117 | 118 | class PoswiseFeedForwardNet(nn.Module): 119 | def __init__(self, d_model, d_ff, dropout=0.1): 120 | super(PoswiseFeedForwardNet, self).__init__() 121 | self.relu = nn.ReLU() 122 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) 123 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) 124 | self.dropout = nn.Dropout(dropout) 125 | self.layer_norm = LayerNormalization(d_model) 126 | 127 | def forward(self, inputs): 128 | # inputs: [b_size x len_q x d_model] 129 | residual = inputs 130 | output = self.relu(self.conv1(inputs.transpose(1, 2))) 131 | 132 | # outputs: [b_size x len_q x d_model] 133 | output = self.conv2(output).transpose(1, 2) 134 | output = self.dropout(output) 135 | 136 | return self.layer_norm(residual + output) -------------------------------------------------------------------------------- /transformer/translator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | import torch 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | 7 | from transformer.models import Transformer 8 | from transformer.beam import Beam 9 | 10 | 11 | class Translator(object): 12 | ''' Load with trained model and handel the beam search ''' 13 | def __init__(self, opt, use_cuda): 14 | self.opt = opt 15 | self.use_cuda = use_cuda 16 | self.tt = torch.cuda if use_cuda else torch 17 | 18 | checkpoint = torch.load(opt.model_path) 19 | model_opt = checkpoint['opt'] 20 | 21 | self.model_opt = model_opt 22 | model = Transformer(model_opt) 23 | if use_cuda: 24 | print('Using GPU..') 25 | model = model.cuda() 26 | 27 | prob_proj = nn.LogSoftmax(dim=-1) 28 | model.load_state_dict(checkpoint['model_params']) 29 | print('Loaded pre-trained model_state..') 30 | 31 | self.model = model 32 | self.model.prob_proj = prob_proj 33 | self.model.eval() 34 | 35 | def translate_batch(self, src_batch): 36 | ''' Translation work in one batch ''' 37 | 38 | # Batch size is in different location depending on data. 39 | enc_inputs, enc_inputs_len = src_batch 40 | batch_size = enc_inputs.size(0) # enc_inputs: [batch_size x src_len] 41 | beam_size = self.opt.beam_size 42 | 43 | # Encode 44 | enc_outputs, _ = self.model.encode(enc_inputs, enc_inputs_len) 45 | 46 | # Repeat data for beam 47 | enc_inputs = enc_inputs.repeat(1, beam_size).view(batch_size * beam_size, -1) 48 | enc_outputs = enc_outputs.repeat(1, beam_size, 1).view( 49 | batch_size * beam_size, enc_outputs.size(1), enc_outputs.size(2)) 50 | 51 | # Prepare beams 52 | beams = [Beam(beam_size, self.use_cuda) for _ in range(batch_size)] 53 | beam_inst_idx_map = { 54 | beam_idx: inst_idx for inst_idx, beam_idx in enumerate(range(batch_size)) 55 | } 56 | n_remaining_sents = batch_size 57 | 58 | # Decode 59 | for i in range(self.opt.max_decode_step): 60 | len_dec_seq = i + 1 61 | # Preparing decoded data_seq 62 | # size: [batch_size x beam_size x seq_len] 63 | dec_partial_inputs = torch.stack([ 64 | b.get_current_state() for b in beams if not b.done]) 65 | # size: [batch_size * beam_size x seq_len] 66 | dec_partial_inputs = dec_partial_inputs.view(-1, len_dec_seq) 67 | # wrap into a Variable 68 | dec_partial_inputs = Variable(dec_partial_inputs, volatile=True) 69 | 70 | # Preparing decoded pos_seq 71 | # size: [1 x seq] 72 | # dec_partial_pos = torch.arange(1, len_dec_seq + 1).unsqueeze(0) # TODO: 73 | # # size: [batch_size * beam_size x seq_len] 74 | # dec_partial_pos = dec_partial_pos.repeat(n_remaining_sents * beam_size, 1) 75 | # # wrap into a Variable 76 | # dec_partial_pos = Variable(dec_partial_pos.type(torch.LongTensor), volatile=True) 77 | dec_partial_inputs_len = torch.LongTensor(n_remaining_sents,).fill_(len_dec_seq) # TODO: note 78 | dec_partial_inputs_len = dec_partial_inputs_len.repeat(beam_size) 79 | #dec_partial_inputs_len = Variable(dec_partial_inputs_len, volatile=True) 80 | 81 | if self.use_cuda: 82 | dec_partial_inputs = dec_partial_inputs.cuda() 83 | dec_partial_inputs_len = dec_partial_inputs_len.cuda() 84 | 85 | # Decoding 86 | dec_outputs, *_ = self.model.decode(dec_partial_inputs, dec_partial_inputs_len, 87 | enc_inputs, enc_outputs) # TODO: 88 | dec_outputs = dec_outputs[:,-1,:] # [batch_size * beam_size x d_model] 89 | dec_outputs = self.model.tgt_proj(dec_outputs) 90 | out = self.model.prob_proj(dec_outputs) 91 | 92 | # [batch_size x beam_size x tgt_vocab_size] 93 | word_lk = out.view(n_remaining_sents, beam_size, -1).contiguous() 94 | 95 | active_beam_idx_list = [] 96 | for beam_idx in range(batch_size): 97 | if beams[beam_idx].done: 98 | continue 99 | 100 | inst_idx = beam_inst_idx_map[beam_idx] # 해당 beam_idx 의 데이터가 실제 data 에서 몇번째 idx인지 101 | if not beams[beam_idx].advance(word_lk.data[inst_idx]): 102 | active_beam_idx_list += [beam_idx] 103 | 104 | if not active_beam_idx_list: # all instances have finished their path to 105 | break 106 | 107 | # In this section, the sentences that are still active are 108 | # compacted so that the decoder is not run on completed sentences 109 | active_inst_idxs = self.tt.LongTensor( 110 | [beam_inst_idx_map[k] for k in active_beam_idx_list]) # TODO: fix 111 | 112 | # update the idx mapping 113 | beam_inst_idx_map = { 114 | beam_idx: inst_idx for inst_idx, beam_idx in enumerate(active_beam_idx_list)} 115 | 116 | def update_active_seq(seq_var, active_inst_idxs): 117 | ''' Remove the encoder outputs of finished instances in one batch. ''' 118 | inst_idx_dim_size, *rest_dim_sizes = seq_var.size() 119 | inst_idx_dim_size = inst_idx_dim_size * len(active_inst_idxs) // n_remaining_sents 120 | new_size = (inst_idx_dim_size, *rest_dim_sizes) 121 | 122 | # select the active instances in batch 123 | original_seq_data = seq_var.data.view(n_remaining_sents, -1) 124 | active_seq_data = original_seq_data.index_select(0, active_inst_idxs) 125 | active_seq_data = active_seq_data.view(*new_size) 126 | 127 | return Variable(active_seq_data, volatile=True) 128 | 129 | def update_active_enc_info(enc_info_var, active_inst_idxs): 130 | ''' Remove the encoder outputs of finished instances in one batch. ''' 131 | 132 | inst_idx_dim_size, *rest_dim_sizes = enc_info_var.size() 133 | inst_idx_dim_size = inst_idx_dim_size * len(active_inst_idxs) // n_remaining_sents 134 | new_size = (inst_idx_dim_size, *rest_dim_sizes) 135 | 136 | # select the active instances in batch 137 | original_enc_info_data = enc_info_var.data.view( 138 | n_remaining_sents, -1, self.model_opt.d_model) 139 | active_enc_info_data = original_enc_info_data.index_select(0, active_inst_idxs) 140 | active_enc_info_data = active_enc_info_data.view(*new_size) 141 | 142 | return Variable(active_enc_info_data, volatile=True) 143 | 144 | enc_inputs = update_active_seq(enc_inputs, active_inst_idxs) 145 | enc_outputs = update_active_enc_info(enc_outputs, active_inst_idxs) 146 | 147 | # update the remaining size 148 | n_remaining_sents = len(active_inst_idxs) 149 | 150 | # Return useful information 151 | all_hyp, all_scores = [], [] 152 | n_best = self.opt.n_best 153 | 154 | for beam_idx in range(batch_size): 155 | scores, tail_idxs = beams[beam_idx].sort_scores() 156 | all_scores += [scores[:n_best]] 157 | 158 | hyps = [beams[beam_idx].get_hypothesis(i) for i in tail_idxs[:n_best]] 159 | all_hyp += [hyps] 160 | 161 | return all_hyp, all_scores 162 | -------------------------------------------------------------------------------- /translate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | import torch 4 | import argparse 5 | 6 | from data.data_utils import load_test_data 7 | from data.data_utils import convert_idx2text 8 | from transformer.translator import Translator 9 | 10 | use_cuda = torch.cuda.is_available() 11 | 12 | 13 | def main(opt): 14 | translator = Translator(opt, use_cuda) 15 | 16 | _, _, tgt_idx2word = torch.load(opt.vocab)['tgt_dict'] 17 | _, test_iter = load_test_data(opt.decode_input, opt.vocab, opt.batch_size, use_cuda) 18 | 19 | lines = 0 20 | print('Translated output will be written in {}'.format(opt.decode_output)) 21 | with open(opt.decode_output, 'w') as output: 22 | with torch.no_grad(): 23 | for batch in test_iter: 24 | all_hyp, all_scores = translator.translate_batch(batch.src) 25 | for idx_seqs in all_hyp: 26 | for idx_seq in idx_seqs: 27 | pred_line = convert_idx2text(idx_seq, tgt_idx2word) 28 | output.write(pred_line + '\n') 29 | lines += batch.src[0].size(0) 30 | print(' {} lines decoded'.format(lines)) 31 | 32 | 33 | if __name__ == '__main__': 34 | parser = argparse.ArgumentParser(description='Translation hyperparams') 35 | parser.add_argument('-model_path', required=True, type=str, help='Path to the test data') 36 | parser.add_argument('-vocab', required=True, type=str, help='Path to an existing vocabulary file') 37 | parser.add_argument('-decode_input', required=True, type=str, help='Path to the source file to translate') 38 | parser.add_argument('-decode_output', required=True, type=str, help='Path to write translated sequences' ) 39 | parser.add_argument('-batch_size', type=int, default=1, help='Batch size') 40 | parser.add_argument('-beam_size', type=int, default=5, help='Beam width') 41 | parser.add_argument('-n_best', type=int, default=1, help='Output the n_best decoded sentence') 42 | parser.add_argument('-max_decode_step', type=int, default=100, help='Maximum # of steps for decoding') 43 | 44 | opt = parser.parse_args() 45 | print(opt) 46 | main(opt) 47 | print('Terminated') --------------------------------------------------------------------------------