├── README.md ├── examples ├── semantics │ ├── chat.db │ ├── sem0.cfg │ ├── demo_sentences │ ├── chat_sentences │ ├── sem1.cfg │ ├── sem3.cfg │ ├── model1.py │ ├── model0.py │ ├── sem2.cfg │ ├── chat80.cfg │ ├── syn2sem.py │ └── chat_pnames.cfg ├── grammars │ ├── spanish_grammars │ │ ├── spanish1.regexp │ │ ├── spanish2.regexp │ │ ├── spanish3.cfg │ │ ├── spanish4.regexp │ │ ├── spanish2.pcfg │ │ ├── spanish3.regexp │ │ ├── spanish1.pcfg │ │ ├── spanish5.regexp │ │ ├── spanish2.cfg │ │ ├── spanish1.cfg │ │ ├── spanish2.fcfg │ │ └── spanish1.fcfg │ ├── sample_grammars │ │ ├── dep_test2.dep │ │ ├── toy.cfg │ │ ├── valuation1.val │ │ ├── sem0.fcfg │ │ ├── np.fcfg │ │ ├── background0.fol │ │ ├── sem1.fcfg │ │ ├── glue_train.conll │ │ ├── sql.fcfg │ │ ├── hole.fcfg │ │ ├── bindop.fcfg │ │ ├── sem2.fcfg │ │ ├── event.fcfg │ │ ├── glue.semtype │ │ ├── drt_glue.semtype │ │ ├── drt_glue_event.semtype │ │ ├── glue_event.semtype │ │ ├── chat80.fcfg │ │ ├── gluesemantics.fcfg │ │ └── chat_pnames.fcfg │ ├── basque_grammars │ │ ├── basque1.regexp │ │ ├── basque2.regexp │ │ ├── basque3.cfg │ │ ├── basque4.regexp │ │ ├── basque2.pcfg │ │ ├── basque1.cfg │ │ ├── basque3.regexp │ │ ├── basque5.regexp │ │ ├── basque2.cfg │ │ ├── basque1.pcfg │ │ ├── basque1.fcfg │ │ ├── basque2.fcfg │ │ └── basque3.fcfg │ ├── book_grammars │ │ ├── background.fol │ │ ├── sql0.fcfg │ │ ├── sql1.fcfg │ │ ├── feat1.fcfg │ │ ├── feat0.fcfg │ │ ├── simple-sem.fcfg │ │ ├── storage.fcfg │ │ ├── german.fcfg │ │ ├── drt.fcfg │ │ └── discourse.fcfg │ └── Makefile └── school │ ├── search.py │ ├── generate.py │ ├── README │ ├── parse1.py │ ├── count.py │ ├── parse2.py │ ├── parse3.py │ ├── parser.py │ ├── words.py │ └── categories.py └── LICENSE /README.md: -------------------------------------------------------------------------------- 1 | nltk_teach 2 | ========== 3 | 4 | Miscellaneous materials for teaching NLP using NLTK 5 | -------------------------------------------------------------------------------- /examples/semantics/chat.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nltk/nltk_teach/HEAD/examples/semantics/chat.db -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish1.regexp: -------------------------------------------------------------------------------- 1 | 2 | NP: {*+*} # busca determinantes y adjetivos que acompañen a nombres 3 | -------------------------------------------------------------------------------- /examples/school/search.py: -------------------------------------------------------------------------------- 1 | from words import * 2 | words = read_text('corpus/telephone.txt') 3 | concordance(" um", words) 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/dep_test2.dep: -------------------------------------------------------------------------------- 1 | 1 John _ NNP _ _ 2 SUBJ _ _ 2 | 2 sees _ VB _ _ 0 ROOT _ _ 3 | 3 a _ DT _ _ 4 SPEC _ _ 4 | 4 dog _ NN _ _ 2 OBJ _ _ 5 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque1.regexp: -------------------------------------------------------------------------------- 1 | NP: {+**} """ # adjetibo edo determinatzaileei loturiko izenak nahiz izen segidak topatzen ditu 2 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish2.regexp: -------------------------------------------------------------------------------- 1 | 2 | NP: {**} # Busca det + nombre + adjetivo 3 | NP: {*+} # Busca seguidas de nombres 4 | 5 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque2.regexp: -------------------------------------------------------------------------------- 1 | NP: {**} # adjetibo edo determinatzaileei loturiko izenak topatzen ditu 2 | NP: {+} # izen segidak topatzen ditu 3 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish3.cfg: -------------------------------------------------------------------------------- 1 | SN -> N | N Adj | SN Conj SN 2 | N -> 'hombres' | 'mujeres' | 'niños' | N Conj N 3 | Adj -> 'mayores' | 'jovenes' 4 | Conj -> 'y' | 'o' | 'e' 5 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque3.cfg: -------------------------------------------------------------------------------- 1 | IS -> IZE_ARR | IZE_ARR ADJ | IS LOT IS 2 | IZE_ARR -> 'gizon' | 'emakume' | 'ume' | IZE_ARR LOT IZE_ARR 3 | ADJ -> 'zaharrak' | 'gazteak' 4 | LOT -> 'eta' | 'edo' 5 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/toy.cfg: -------------------------------------------------------------------------------- 1 | S -> NP VP 2 | PP -> P NP 3 | NP -> Det N | NP PP 4 | VP -> V NP | VP PP 5 | Det -> 'a' | 'the' 6 | N -> 'dog' | 'cat' 7 | V -> 'chased' | 'sat' 8 | P -> 'on' | 'in' 9 | 10 | -------------------------------------------------------------------------------- /examples/school/generate.py: -------------------------------------------------------------------------------- 1 | from words import * 2 | 3 | telephone_words = read_words('corpus/telephone.txt') 4 | model = train(telephone_words) 5 | generate(model) 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque4.regexp: -------------------------------------------------------------------------------- 1 | IS: {(**+**)*} #noun phrase chunks 2 | AS: {(*)+} # verb phrase chunks 3 | PS: {+} # prepositional phrase chunks 4 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish4.regexp: -------------------------------------------------------------------------------- 1 | 2 | SN: {?+*} # noun phrase chunks 3 | SV: {?} # verb phrase chunks 4 | SP: {} # prepositional phrase chunks 5 | 6 | -------------------------------------------------------------------------------- /examples/school/README: -------------------------------------------------------------------------------- 1 | The files in this directory were created for teaching computational 2 | linguistics in secondary school English classes. For instructions 3 | and lesson plans, please see http://nltk.org/index.php/Electronic_Grammar 4 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish2.pcfg: -------------------------------------------------------------------------------- 1 | SN -> N [0.5]| N Adj [0.3]| SN Conj SN [0.2] 2 | N -> 'hombres' [0.1]| 'mujeres' [0.2]| 'niños' [0.3]| N Conj N [0.4] 3 | Adj -> 'mayores' [0.3]| 'jovenes' [0.7] 4 | Conj -> 'y' [0.6]| 'o' [0.3] | 'e' [0.1] 5 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish3.regexp: -------------------------------------------------------------------------------- 1 | 2 | SN: 3 | {<.*>+} # Crea Un Chunk Con Cualquier Cosa 4 | }+{ # Considerar Como Chink Apariciones De Verbos (v.*), Preposiciones (sp.*) y Signos De Puntuación (F.*) 5 | -------------------------------------------------------------------------------- /examples/school/parse1.py: -------------------------------------------------------------------------------- 1 | from parser import * 2 | 3 | grammar = """ 4 | NP -> P | D J N 5 | D -> 'a' 6 | J -> 'red' | 'green' 7 | N -> 'chair' | 'house' 8 | """ 9 | 10 | phrase = 'a red chair' 11 | 12 | parse_draw(phrase, grammar) 13 | 14 | 15 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque2.pcfg: -------------------------------------------------------------------------------- 1 | IS -> IZE_ARR [0.5] | IZE_ARR ADJ [0.3] | IS LOT IS [0.2] 2 | IZE_ARR -> 'gizon' [0.1] | 'emakume' [0.2] | 'ume' [0.3] | IZE_ARR LOT IZE_ARR [0.4] 3 | ADJ -> 'zaharrak' [0.4] | 'gazteak' [0.6] 4 | LOT -> 'eta' [0.9] | 'edo' [0.1] 5 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish1.pcfg: -------------------------------------------------------------------------------- 1 | S -> SN SV [1.0] 2 | SV -> VTrans SN [0.4] 3 | SV -> VIntrans [0.3] 4 | SV -> VSupl SN SN [0.3] 5 | VTrans -> "bebió" [1.0] 6 | VIntrans -> "murió" [1.0] 7 | VSupl -> "regaló" [1.0] 8 | SN -> "flores" [0.6] 9 | SN -> "agua" [0.4] 10 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish5.regexp: -------------------------------------------------------------------------------- 1 | 2 | SN: {?+*} # noun phrase chunks 3 | SV: {?+*} # verb phrase chunks 4 | SP: {} # prepositional phrase chunks 5 | S: {} # Chunk NP, VP 6 | 7 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque1.cfg: -------------------------------------------------------------------------------- 1 | P -> IS AS 2 | AS -> IS ADI 3 | AS -> ADI 4 | IS -> IM erl_atz 5 | IM -> ize_arr 6 | IM -> ize_izb 7 | ADI -> adt 8 | erl_atz -> "k" | "a" 9 | ize_arr -> "ardo" | "egunkari" | "baloi" 10 | ize_izb -> "Pintxo" | "Kepa" 11 | adt -> "dakar" | "darama" 12 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque3.regexp: -------------------------------------------------------------------------------- 1 | IS: 2 | {<.*>+} # Edozer Onartzen Duen Chunkerra 3 | }+{ # Chink Bezala Barneratu Aditzak (ADI.*, ADT.* eta ADL.*), Adberbioak (ADB.*), Preposizioak (POST.*), Loturak (LOT.*) Eta Puntuazio Ikurrak (PUNT.*) 4 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque5.regexp: -------------------------------------------------------------------------------- 1 | IS: {(*****)*} #noun phrase chunks 2 | AS: {(+)+*} # verb phrase chunks 3 | PS: {+} # prepositional phrase chunks 4 | S: {} 5 | {} # Chunk NP, VP 6 | -------------------------------------------------------------------------------- /examples/school/count.py: -------------------------------------------------------------------------------- 1 | from words import * 2 | words = read_words('corpus/telephone.txt') 3 | counts = count_words(words) 4 | print_freq(counts) 5 | 6 | 7 | 8 | 9 | from words import * 10 | words = read_words('corpus/rural.txt') 11 | counts = count_pairs(words) 12 | print_freq(counts) 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish2.cfg: -------------------------------------------------------------------------------- 1 | S -> SN SV 2 | SP -> P SN 3 | SN -> Det N | SN SP 4 | SV -> V SN | SV SP 5 | Det -> "el" | "la" | "un" | "una" | "los" | "las" 6 | N -> "tren" | "telescopio" | "noticia" | "mesa" | "hombre" | "casa" | "amiga" 7 | V -> "vio" | "leí" | "encontró" 8 | P -> "en" | "sobre" | "con" | "de" | "a" 9 | -------------------------------------------------------------------------------- /examples/school/parse2.py: -------------------------------------------------------------------------------- 1 | from parser import * 2 | 3 | grammar = """ 4 | S -> NP VP | VP 5 | VP -> V NP | VP PP 6 | NP -> Det N | NP PP 7 | PP -> P NP 8 | NP -> 'I' 9 | Det -> 'the' | 'my' 10 | N -> 'elephant' | 'pajamas' 11 | V -> 'shot' 12 | P -> 'in' 13 | """ 14 | 15 | sent = 'I shot the elephant in my pajamas' 16 | parse_draw(sent, grammar) 17 | 18 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish1.cfg: -------------------------------------------------------------------------------- 1 | S -> SN SV 2 | SV -> v SN 3 | SV -> v 4 | SN -> det GN 5 | GN -> nom_com 6 | GN -> nom_prop 7 | det -> "el" | "la" | "los" | "las" | "un" | "una" | "unos" | "unas" 8 | nom_com -> "vecino" | "ladrones" | "mujeres" | "bosques" | "noche" | "flauta" | "ventana" 9 | nom_prop -> "Jose" | "Lucas" | "Pedro" | "Marta" 10 | v -> "toca" | "moja" | "adoran" | "robaron" | "escondieron" | "rompió" 11 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/valuation1.val: -------------------------------------------------------------------------------- 1 | john => b1 2 | mary => g1 3 | suzie => g2 4 | fido => d1 5 | tess => d2 6 | noosa => n 7 | girl => {g1, g2} 8 | boy => {b1, b2} 9 | dog => {d1, d2} 10 | bark => {d1, d2} 11 | walk => {b1, g2, d1} 12 | chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} 13 | see => {(b1, g1), (b2, d2), (g1, b1),(d2, b1), (g2, n)} 14 | in => {(b1, n), (b2, n), (d2, n)} 15 | with => {(b1, g1), (g1, b1), (d1, b1), (b1, d1)} 16 | -------------------------------------------------------------------------------- /examples/semantics/sem0.cfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem0.cfg 2 | ## 3 | ## Minimal feature-based grammar with lambda semantics. 4 | ## 5 | ## Author: Ewan Klein 6 | ## URL: 7 | ## For license information, see LICENSE.TXT 8 | 9 | % start S 10 | 11 | S[sem = ] -> NP[sem=?subj] VP[sem=?vp] 12 | VP[sem=?v] -> V[sem=?v] 13 | NP[sem=] -> 'John' 14 | V[sem=<\x.(walk x)>] -> 'walks' 15 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish2.fcfg: -------------------------------------------------------------------------------- 1 | % start S 2 | # ############################ 3 | # Grammar Rules 4 | # ############################ 5 | S -> SN S/SN 6 | S/?x -> SV/?x 7 | S/?x -> V[+aux] COMP SV/?x 8 | SN/SN -> 9 | SV/?x -> V[-aux] SN/?x 10 | # ############################ 11 | # Lexical Rules 12 | # ############################ 13 | V[-aux] -> 'adoras' | 'odias' 14 | V[+aux] -> 'dices' 15 | 16 | SN -> 'quien' | 'que' 17 | COMP -> 'que' 18 | -------------------------------------------------------------------------------- /examples/semantics/demo_sentences: -------------------------------------------------------------------------------- 1 | # Natural Language Toolkit: Demo Sentences 2 | # 3 | # Author: Ewan Klein 4 | # URL: 5 | # For license information, see LICENSE.TXT 6 | ############################################ 7 | # Some example sentences for the sem2.cfg demo 8 | 9 | Fido sees a boy with Mary 10 | John sees Mary 11 | every girl chases a dog 12 | every boy chases a girl 13 | John walks with a girl in Noosa 14 | who walks 15 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/sem0.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem0.fcfg 2 | ## 3 | ## Minimal feature-based grammar with lambda semantics. 4 | ## 5 | ## Author: Ewan Klein 6 | ## URL: 7 | ## For license information, see LICENSE.TXT 8 | 9 | % start S 10 | 11 | S[SEM=] -> NP[SEM=?subj] VP[SEM=?vp] 12 | VP[SEM=?v] -> V[SEM=?v] 13 | NP[SEM=] -> 'Cyril' 14 | V[SEM=<\x.bark(x)>] -> 'barks' 15 | -------------------------------------------------------------------------------- /examples/semantics/chat_sentences: -------------------------------------------------------------------------------- 1 | # Natural Language Toolkit: Demo Sentences 2 | # 3 | # Author: Ewan Klein 4 | # URL: 5 | # For license information, see LICENSE.TXT 6 | ############################################ 7 | # Some example sentences for the Chat-80 demo 8 | 9 | what is the capital of France 10 | which sea borders France 11 | what contains Berlin 12 | which Asian countries border the_Mediterranean 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/np.fcfg: -------------------------------------------------------------------------------- 1 | % start NP 2 | NP[AGR=?a] -> Det[AGR=?a] N[AGR=?a] 3 | Det[AGR=[NUM='sg', PER=3]] -> 'this' | 'that' 4 | Det[AGR=[NUM='pl', PER=3]] -> 'these' | 'those' 5 | Det[AGR=[NUM='pl', PER=1]] -> 'we' 6 | Det[AGR=[PER=2]] -> 'you' 7 | N[AGR=[NUM='sg', GND='m']] -> 'boy' 8 | N[AGR=[NUM='pl', GND='m']] -> 'boys' 9 | N[AGR=[NUM='sg', GND='f']] -> 'girl' 10 | N[AGR=[NUM='pl', GND='f']] -> 'girls' 11 | N[AGR=[NUM='sg']] -> 'student' 12 | N[AGR=[NUM='pl']] -> 'students' 13 | -------------------------------------------------------------------------------- /examples/school/parse3.py: -------------------------------------------------------------------------------- 1 | from parser import * 2 | 3 | grammar = """ 4 | S -> NP VP | VP 5 | PP -> P NP 6 | NP -> N | Det N | N N | NP PP | N VP 7 | VP -> V | V NP | VP PP | VP ADVP 8 | ADVP -> ADV NP 9 | Det -> 'a' | 'an' | 'the' 10 | N -> 'flies' | 'banana' | 'fruit' | 'arrow' | 'time' 11 | V -> 'like' | 'flies' | 'time' 12 | P -> 'on' | 'in' | 'by' 13 | ADV -> 'like' 14 | """ 15 | 16 | sent = 'time flies like an arrow' 17 | 18 | parse_draw(sent, grammar) 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /examples/school/parser.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import nltk 4 | 5 | def parse(sent, grammar): 6 | gr = nltk.parse_cfg(grammar) 7 | parser = nltk.parse.ChartParse(gr, nltk.parse.TD_STRATEGY) 8 | return parser.get_parse_list(sent.split()) 9 | 10 | def parse_draw(sent, grammar): 11 | trees = parse(sent, grammar) 12 | nltk.draw.draw_trees(*trees) 13 | 14 | def parse_print(sent, grammar): 15 | trees = parse(sent, grammar) 16 | for tree in trees: 17 | print(tree) 18 | 19 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/background0.fol: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: background0.fol 2 | ## 3 | ## Illustration of simple knowledge base for use with inference tools. 4 | ## To accompany sem4.fcfg 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | all x. (boxerdog(x) -> dog(x)) 11 | all x. (boxer(x) -> person(x)) 12 | 13 | all x. (-(dog(x) & person(x))) 14 | 15 | some x. boxer(x) 16 | some x. boxerdog(x) 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /examples/semantics/sem1.cfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem1.cfg 2 | ## 3 | ## Minimal feature-based grammar to illustrate the interpretation of 4 | ## determiner phrases. 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[sem = ] -> NP[sem=?subj] VP[sem=?vp] 13 | VP[sem=?v] -> IV[sem=?v] 14 | NP[sem=] -> Det[sem=?det] N[sem=?n] 15 | 16 | Det[sem=<\Q P. some x. ((Q x) and (P x))>] -> 'a' 17 | N[sem=] -> 'dog' 18 | IV[sem=<\x.(bark x)>] -> 'barks' 19 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque2.cfg: -------------------------------------------------------------------------------- 1 | S -> is as 2 | is -> ize adj | ior 3 | ize -> 'gaizkile' | 'epaile' | 'bizilagun' 4 | adj -> 'gaiztoek' | 'gaiztoak' | 'kanpotarrak' | 'kanpotarrek' | 'berriak' | 'berriek' 5 | ior -> 'haiek' | 'hark' 6 | as -> mendekoa as | adlg mendekoa as | adlg adlg mendekoa as | adi adl 7 | mendekoa -> adlg mendekoa | adlg adlg mendekoa | 'joatea' | 'joateko' | 'sartzera' 8 | adi -> 'esan' | 'debekatzen' | 'eraman' 9 | adl -> 'zuen' |'zioten' 10 | adlg -> 'bozgorailuarekin' | 'euskal_presoekin' | 'epaitegian' | 'mendira' | 'ejertzitoan' | 'derrigorrean' | 'lagunekin' 11 | -------------------------------------------------------------------------------- /examples/semantics/sem3.cfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem3.cfg 2 | ## 3 | ## First attempt at HPSG-style feature-based semantics. 4 | ## This version doesn't work properly! 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[sem=?vp] -> NP[sem=?np] VP[subj=?np, sem=?vp] 13 | VP[sem=?v, subj=?np] -> IV[sem=?v, subj=?np] 14 | NP[sem=[index='k',name='kim']] -> 'Kim' 15 | IV[sem=[rel='bark', arg=?i], subj=[sem=[index=?i]]] -> 'barks' 16 | #IV[fsem=[rel='bark', arg=(1)[]], subj=[fsem=[index->(1)]]] -> 'barks' 17 | 18 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/sem1.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem1.fcfg 2 | ## 3 | ## Minimal feature-based grammar to illustrate the interpretation of 4 | ## determiner phrases. 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[SEM = ] -> NP[SEM=?subj] VP[SEM=?vp] 13 | VP[SEM=?v] -> IV[SEM=?v] 14 | NP[SEM=] -> Det[SEM=?det] N[SEM=?n] 15 | 16 | Det[SEM=<\Q P.exists x.(Q(x) & P(x))>] -> 'a' 17 | Det[SEM=<\Q P.all x.(Q(x) -> P(x))>] -> 'every' 18 | N[SEM=<\x.dog(x)>] -> 'dog' 19 | IV[SEM=<\x.bark(x)>] -> 'barks' 20 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/background.fol: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: background1.fol 2 | ## 3 | ## Illustration of simple knowledge base for use with inference tools. 4 | ## To accompany sem4.fcfg 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | all x. (boxerdog(x) -> dog(x)) 11 | all x. (boxer(x) -> person(x)) 12 | 13 | all x. (-(dog(x) & person(x))) 14 | 15 | all x. (married(x) <-> exists y. marry(x,y)) 16 | all x. (bark(x) -> dog(x)) 17 | 18 | all x. all y. (marry(x,y) -> (person(x) & person(y))) 19 | 20 | (-(Vincent = Mia)) 21 | (-(Vincent = Fido)) 22 | (-(Mia = Fido)) 23 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/glue_train.conll: -------------------------------------------------------------------------------- 1 | 1 John _ NNP _ _ 2 SUBJ _ _ 2 | 2 runs _ VB _ _ 0 ROOT _ _ 3 | 4 | 1 a _ DT _ _ 2 SPEC _ _ 5 | 2 man _ NN _ _ 3 SUBJ _ _ 6 | 3 runs _ VB _ _ 0 ROOT _ _ 7 | 8 | 1 John _ NNP _ _ 2 SUBJ _ _ 9 | 2 sees _ VB _ _ 0 ROOT _ _ 10 | 3 Mary _ NNP _ _ 2 OBJ _ _ 11 | 12 | 1 every _ DT _ _ 2 SPEC _ _ 13 | 2 girl _ NN _ _ 3 SUBJ _ _ 14 | 3 chases _ VB _ _ 0 ROOT _ _ 15 | 4 an _ DT _ _ 5 SPEC _ _ 16 | 5 animal _ NN _ _ 3 OBJ _ _ 17 | 18 | 1 Bill _ NNP _ _ 2 SUBJ _ _ 19 | 2 sees _ VB _ _ 0 ROOT _ _ 20 | 3 a _ DT _ _ 4 SPEC _ _ 21 | 4 dog _ NN _ _ 2 OBJ _ _ 22 | 23 | 1 every _ DT _ _ 2 SPEC _ _ 24 | 2 girl _ NN _ _ 3 SUBJ _ _ 25 | 3 chases _ VB _ _ 0 ROOT _ _ 26 | 4 John _ NNP _ _ 3 OBJ _ _ 27 | 28 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque1.pcfg: -------------------------------------------------------------------------------- 1 | as -> mendekoa as [0.15] 2 | as -> adlg mendekoa as [0.31] 3 | as -> adlg adlg mendekoa as [0.08] 4 | as -> adi adl [0.46] 5 | mendekoa -> adlg mendekoa [0.37] 6 | mendekoa -> adlg adlg mendekoa [0.09] 7 | mendekoa -> 'joatea' [0.18] 8 | mendekoa -> 'joateko' [0.27] 9 | mendekoa -> 'sartzera' [0.09] 10 | adi -> 'esan' [0.5] 11 | adi -> 'debekatzen' [0.33] 12 | adi -> 'eraman' [0.17] 13 | adl -> 'zuen' [0.17] 14 | adl -> 'zioten' [0.83] 15 | adlg -> 'bozgorailuarekin' [0.28] 16 | adlg -> 'euskal_presoekin' [0.18] 17 | adlg -> 'epaitegian' [0.09] 18 | adlg -> 'mendira' [0.18] 19 | adlg -> 'ejertzitoan' [0.09] 20 | adlg -> 'derrigorrean' [0.09] 21 | adlg -> 'lagunekin' [0.09] 22 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/sql.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sql.fcfg 2 | ## 3 | ## Deliberately naive string-based grammar for 4 | ## deriving SQL queries from English 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[sem=(?np + ?vp)] -> NP[sem=?np] VP[sem=?vp] 13 | 14 | VP[sem=(?v + ?pp)] -> IV[sem=?v] PP[sem=?pp] 15 | VP[sem=(?v + ?np)] -> TV[sem=?v] NP[sem=?np] 16 | 17 | NP[sem=(?det + ?n)] -> Det[sem=?det] N[sem=?n] 18 | NP[sem='Country="japan"'] -> 'Japan' 19 | NP[sem='Country="united_states"'] -> 'USA' 20 | 21 | Det[sem='SELECT'] -> 'Which' 22 | N[sem='City FROM city_table'] -> 'cities' 23 | 24 | IV[sem='WHERE'] -> 'are' 25 | PP[sem=?np] -> P[sem=?p] NP[sem=?np] 26 | P -> 'in' 27 | 28 | -------------------------------------------------------------------------------- /examples/semantics/model1.py: -------------------------------------------------------------------------------- 1 | # Natural Language Toolkit: Example Model 2 | # 3 | # Author: Ewan Klein 4 | # URL: 5 | # For license information, see LICENSE.TXT 6 | 7 | """ 8 | This is a sample model to accompany the U{chat80.cfg} grammar} and is 9 | intended to be imported as a module. 10 | """ 11 | 12 | from nltk.semantics import * 13 | from nltk.corpora import chat80 14 | 15 | rels = chat80.rels 16 | concept_map = chat80.process_bundle(rels) 17 | concepts = concept_map.values() 18 | val = chat80.make_valuation(concepts, read=True) 19 | 20 | #Bind C{dom} to the C{domain} property of C{val}. 21 | dom = val.domain 22 | 23 | #Initialize a model with parameters C{dom} and C{val}. 24 | m = Model(dom, val) 25 | 26 | #Initialize a variable assignment with parameter C{dom}. 27 | g = Assignment(dom) 28 | -------------------------------------------------------------------------------- /examples/grammars/Makefile: -------------------------------------------------------------------------------- 1 | # NLTK: Documentation Makefile 2 | # 3 | # Copyright (C) 2001-2014 NLTK Project 4 | # Author: Ewan Klein 5 | # URL: 6 | # For license information, see LICENSE.TXT 7 | 8 | DATADIR = ../../../nltk_data 9 | PUBLISH = $(DATADIR)/packages/grammars 10 | 11 | PACKAGE_DIRS = book_grammars sample_grammars #basque_grammars spanish_grammars 12 | PACKAGES := $(addsuffix .zip, $(PACKAGE_DIRS)) 13 | 14 | ZIP = zip 15 | 16 | define remove 17 | $(if $(wildcard $1), rm $1,) 18 | endef 19 | 20 | all: publish 21 | 22 | ci: 23 | git ci -m "updated grammar files" 24 | 25 | zip: clean $(PACKAGES) 26 | 27 | 28 | clean: 29 | $(call remove, *.zip) 30 | 31 | %.zip: % 32 | $(ZIP) -r $< $< 33 | # git add *zip 34 | 35 | publish: zip 36 | cp $(PACKAGES) $(PUBLISH) 37 | $(MAKE) -C $(DATADIR) grammars 38 | $(MAKE) -C $(DATADIR) pkg_index 39 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/sql0.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sql.fcfg 2 | ## 3 | ## Deliberately naive string-based grammar for 4 | ## deriving SQL queries from English 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp] 13 | 14 | VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp] 15 | VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap] 16 | NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n] 17 | PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np] 18 | AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp] 19 | 20 | NP[SEM='Country="greece"'] -> 'Greece' 21 | NP[SEM='Country="china"'] -> 'China' 22 | 23 | Det[SEM='SELECT'] -> 'Which' | 'What' 24 | 25 | N[SEM='City FROM city_table'] -> 'cities' 26 | 27 | IV[SEM=''] -> 'are' 28 | A[SEM=''] -> 'located' 29 | P[SEM=''] -> 'in' 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque1.fcfg: -------------------------------------------------------------------------------- 1 | % start AS 2 | # ############################ 3 | # Grammar Rules 4 | # ############################ 5 | # AS expansion rules 6 | AS[ergnum=?n1, absnum=?n2] -> IS[kas=erg, num=?n1] AS[ergnum=?n1, absnum=?n2] 7 | AS[ergnum=?n1, absnum=?n2] -> AS[ergnum=?n1, absnum=?n2] IS[kas=erg, num=?n1] 8 | AS[ergnum=?n1, absnum=?n2] -> IS[kas=abs, num=?n2] AS[ergnum=?n1, absnum=?n2] 9 | AS[ergnum=?n1, absnum=?n2] -> AS[ergnum=?n1, absnum=?n2] IS[kas=abs, num=?n2] 10 | IS[kas=?k, num=?n] -> ize[azp=arr] knmdek[kas=?k, num=?n] 11 | AS[ergnum=?n1, absnum=?n2] -> adt[ergnum=?n1, absnum=?n2] 12 | # ############################ 13 | # Lexicon 14 | # ############################ 15 | adt[ergnum=hu, absnum=hu] -> 'dakar' | 'darama' 16 | adt[ergnum=hk, absnum=hu] -> 'dakarte' | 'daramate' 17 | knmdek[kas=erg, num=hu] -> 'ak' 18 | knmdek[kas=erg, num=hk] -> 'ek' 19 | knmdek[kas=abs, num=hk] -> 'ak' 20 | knmdek[kas=abs, num=hu] -> 'a' 21 | ize[azp=arr] -> 'zakur' | 'gizon' 22 | 23 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque2.fcfg: -------------------------------------------------------------------------------- 1 | % start S 2 | # ############################ 3 | # Grammar Rules 4 | # ############################ 5 | S -> IS[kas=erg] AS/IS 6 | # IS erregelak 7 | IS[kas=?k, num=?n] -> ize[azp=arr] knmdek[kas=?k, num=?n] 8 | IS[kas=?k, num=?n] -> ize[azp=ber] knmdek[kas=?k, num=?n] 9 | IS[kas=?k, num=?n]/IS -> 10 | # AS erregelak 11 | AS[ergnum=?n1, absnum=?n2]/?x -> IS[kas=abs, num=?n1]/?x AS[ergnum=?n1, absnum=?n2] 12 | AS[ergnum=?n1, absnum=?n2] -> adi adl[ergnum=?n1, absnum=?n2] 13 | # ############################ 14 | # Lexicon 15 | # ############################ 16 | knmdek[kas=erg, num=hu] -> 'ak' 17 | knmdek[kas=erg, num=hk] -> 'ek' 18 | knmdek[kas=abs, num=hk] -> 'ak' 19 | knmdek[kas=abs, num=hu] -> 'a' 20 | ize[azp=arr] -> 'bizilagun' | 'aita' | 'gizon' | 'emakume' 21 | ize[azp=ber] -> 'Kepa' | 'Ainara' 22 | adi -> 'ekarri' | 'eraman' | 'puskatu' | 'lapurtu' 23 | adl[ergnum=hu, absnum=hu] -> 'du' | 'zuen' 24 | adl[ergnum=hk, absnum=hu] -> 'dute' | 'zuten' 25 | adl[ergnum=hu, absnum=hk] -> 'ditu' | 'zituen' 26 | adl[ergnum=hk, absnum=hk] -> 'dituzte' | 'zituzten' 27 | -------------------------------------------------------------------------------- /examples/grammars/basque_grammars/basque3.fcfg: -------------------------------------------------------------------------------- 1 | % start S 2 | # ############################ 3 | # Grammar Rules 4 | # ############################ 5 | 6 | ## NORK-NOR Kasuak 7 | 8 | S -> IS[kas=erg] AS/IS 9 | # IS erregelak 10 | IS[kas=?k, num=?n] -> ize[azp=arr] knmdek[kas=?k, num=?n] 11 | IS[kas=?k, num=?n] -> ize[azp=ber] knmdek[kas=?k, num=?n] 12 | 13 | IS[kas=?k, num=?n]/IS -> 14 | 15 | # AS erregelak 16 | AS[ergnum=?n1, absnum=?n2]/?x -> IS[kas=abs, num=?n1]/?x AS[ergnum=?n1, absnum=?n2] 17 | AS[ergnum=?n1, absnum=?n2] -> adi adl[ergnum=?n1, absnum=?n2] 18 | # ############################ 19 | # Lexicon 20 | # ############################ 21 | 22 | knmdek[kas=erg, num=hu] -> 'ak' 23 | knmdek[kas=erg, num=hk] -> 'ek' 24 | 25 | knmdek[kas=abs, num=hk] -> 'ak' 26 | knmdek[kas=abs, num=hu] -> 'a' 27 | 28 | ize[azp=arr] -> 'bizilagun' | 'aita' | 'gizon' | 'emakume' 29 | ize[azp=ber] -> 'Kepa' | 'Ainara' 30 | 31 | adi -> 'ekarri' | 'eraman' | 'puskatu' | 'lapurtu' 32 | 33 | adl[ergnum=hu, absnum=hu] -> 'du' | 'zuen' 34 | adl[ergnum=hk, absnum=hu] -> 'dute' | 'zuten' 35 | adl[ergnum=hu, absnum=hk] -> 'ditu' | 'zituen' 36 | adl[ergnum=hk, absnum=hk] -> 'dituzte' | 'zituzten' 37 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/hole.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: hole.fcfg 2 | ## 3 | ## Minimal feature-based grammar with lambda semantics for use by the hole.py 4 | ## module for Hole Semantics (see Blackburn and Bos). 5 | ## 6 | ## Author: Dan Garrette 7 | ## Robin Cooper 8 | ## URL: 9 | ## For license information, see LICENSE.TXT 10 | 11 | % start S 12 | 13 | S[SEM=] -> NP[SEM=?subj] VP[SEM=?vp] 14 | VP[SEM=?v] -> IV[SEM=?v] 15 | VP[NUM=?n,SEM=] -> TV[NUM=?n,SEM=?v] NP[SEM=?obj] 16 | NP[SEM=] -> Det[SEM=?det] N[SEM=?n] 17 | 18 | Det[SEM=<\P Q h l.exists h1 l1 l2 l3 x.(ALL(l2,x,l3) & IMP(l3,l1,h1) & LEQ(l,h1) & LEQ(l2,h) & P(x)(h)(l1) & Q(x)(h)(l) & HOLE(h) & HOLE(h1) & LABEL(l) & LABEL(l1) & LABEL(l2) & LABEL(l3))>] -> 'every' 19 | Det[SEM=<\P Q h l.exists h1 l1 l2 l3 x.(EXISTS(l2,x,l3) & AND(l3,l1,h1) & LEQ(l,h1) & LEQ(l2,h) & P(x)(h)(l1) & Q(x)(h)(l) & HOLE(h) & HOLE(h1) & LABEL(l) & LABEL(l1) & LABEL(l2) & LABEL(l3))>] -> 'a' 20 | N[SEM=<\x h l.(PRED(l,girl,x) & LEQ(l,h) & HOLE(h) & LABEL(l))>] -> 'girl' 21 | N[SEM=<\x h l.(PRED(l,dog,x) & LEQ(l,h) & HOLE(h) & LABEL(l))>] -> 'dog' 22 | IV[SEM=<\x h l.(PRED(l,bark,x) & LEQ(l,h) & HOLE(h) & LABEL(l))>] -> 'barks' 23 | TV[SEM=<\P x.P(\y h l.(PRED(l,chase,x,y) & LEQ(l,h) & HOLE(h) & LABEL(l)))>] -> 'chases' 24 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/sql1.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sql.fcfg 2 | ## 3 | ## Deliberately naive string-based grammar for 4 | ## deriving SQL queries from English 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | 12 | S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp] 13 | 14 | VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp] 15 | VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap] 16 | VP[SEM=(?v + ?np)] -> TV[SEM=?v] NP[SEM=?np] 17 | VP[SEM=(?vp1 + ?c + ?vp2)] -> VP[SEM=?vp1] Conj[SEM=?c] VP[SEM=?vp2] 18 | 19 | NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n] 20 | NP[SEM=(?n + ?pp)] -> N[SEM=?n] PP[SEM=?pp] 21 | NP[SEM=?n] -> N[SEM=?n] | CardN[SEM=?n] 22 | 23 | ## NB Numbers in the Chat-80 database represent thousands. 24 | CardN[SEM='1000'] -> '1,000,000' 25 | 26 | PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np] 27 | AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp] 28 | 29 | NP[SEM='Country="greece"'] -> 'Greece' 30 | NP[SEM='Country="china"'] -> 'China' 31 | 32 | Det[SEM='SELECT'] -> 'Which' | 'What' 33 | Conj[SEM='AND'] -> 'and' 34 | 35 | N[SEM='City FROM city_table'] -> 'cities' 36 | N[SEM='Population'] -> 'populations' 37 | 38 | IV[SEM=''] -> 'are' 39 | TV[SEM=''] -> 'have' 40 | A -> 'located' 41 | P[SEM=''] -> 'in' 42 | P[SEM='>'] -> 'above' 43 | 44 | 45 | -------------------------------------------------------------------------------- /examples/semantics/model0.py: -------------------------------------------------------------------------------- 1 | # Natural Language Toolkit: Example Model 2 | # 3 | # Author: Ewan Klein 4 | # URL: 5 | # For license information, see LICENSE.TXT 6 | 7 | """ 8 | This is a sample model to accompany the U{sem2.cfg} grammar, and is 9 | intended to be imported as a module. 10 | """ 11 | 12 | from nltk.sem import * 13 | 14 | #Initialize a valuation of non-logical constants.""" 15 | 16 | v = [ 17 | ('john', 'b1'), 18 | ('mary', 'g1'), 19 | ('suzie', 'g2'), 20 | ('fido', 'd1'), 21 | ('tess', 'd2'), 22 | ('noosa', 'n'), 23 | ('girl', set(['g1', 'g2'])), 24 | ('boy', set(['b1', 'b2'])), 25 | ('dog', set(['d1', 'd2'])), 26 | ('bark', set(['d1', 'd2'])), 27 | ('walk', set(['b1', 'g2', 'd1'])), 28 | ('chase', set([('b1', 'g1'), ('b2', 'g1'), ('g1', 'd1'), ('g2', 'd2')])), 29 | ('see', set([('b1', 'g1'), ('b2', 'd2'), ('g1', 'b1'),('d2', 'b1'), ('g2', 'n')])), 30 | ('in', set([('b1', 'n'), ('b2', 'n'), ('d2', 'n')])), 31 | ('with', set([('b1', 'g1'), ('g1', 'b1'), ('d1', 'b1'), ('b1', 'd1')])) 32 | ] 33 | 34 | #Read in the data from C{v} 35 | val = Valuation(v) 36 | 37 | #Bind C{dom} to the C{domain} property of C{val} 38 | dom = val.domain 39 | 40 | #Initialize a model with parameters C{dom} and C{val}. 41 | m = Model(dom, val) 42 | 43 | #Initialize a variable assignment with parameter C{dom} 44 | g = Assignment(dom) 45 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/feat1.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: feat1.fcfg 2 | ## 3 | ## Second example of a feature-based grammar, illustrating 4 | ## SUBCAT and slash features. Also introduces SBar and embedded 5 | ## clauses. 6 | ## Used in Feature-Based Grammars chapter. 7 | ## 8 | ## Author: Ewan Klein 9 | ## URL: 10 | ## For license information, see LICENSE.TXT 11 | 12 | % start S 13 | # ################### 14 | # Grammar Productions 15 | # ################### 16 | 17 | S[-INV] -> NP VP 18 | S[-INV]/?x -> NP VP/?x 19 | 20 | S[-INV] -> NP S/NP 21 | S[-INV] -> Adv[+NEG] S[+INV] 22 | 23 | S[+INV] -> V[+AUX] NP VP 24 | S[+INV]/?x -> V[+AUX] NP VP/?x 25 | 26 | SBar -> Comp S[-INV] 27 | SBar/?x -> Comp S[-INV]/?x 28 | 29 | VP -> V[SUBCAT=intrans, -AUX] 30 | 31 | VP -> V[SUBCAT=trans, -AUX] NP 32 | VP/?x -> V[SUBCAT=trans, -AUX] NP/?x 33 | 34 | VP -> V[SUBCAT=clause, -AUX] SBar 35 | VP/?x -> V[SUBCAT=clause, -AUX] SBar/?x 36 | 37 | VP -> V[+AUX] VP 38 | VP/?x -> V[+AUX] VP/?x 39 | 40 | # ################### 41 | # Lexical Productions 42 | # ################### 43 | V[SUBCAT=intrans, -AUX] -> 'walk' | 'sing' 44 | V[SUBCAT=trans, -AUX] -> 'see' | 'like' 45 | V[SUBCAT=clause, -AUX] -> 'say' | 'claim' 46 | V[+AUX] -> 'do' | 'can' 47 | 48 | NP[-WH] -> 'you' | 'cats' 49 | NP[+WH] -> 'who' 50 | 51 | Adv[+NEG] -> 'rarely' | 'never' 52 | 53 | NP/NP -> 54 | 55 | Comp -> 'that' 56 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/feat0.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: feat0.fcfg 2 | ## 3 | ## First example of a feature-based grammar for English, illustrating 4 | ## value-sharing of NUM and TENSE features. 5 | ## Used in Feature-Based Grammars chapter. 6 | ## 7 | ## Author: Ewan Klein 8 | ## URL: 9 | ## For license information, see LICENSE.TXT 10 | 11 | % start S 12 | # ################### 13 | # Grammar Productions 14 | # ################### 15 | 16 | # S expansion productions 17 | S -> NP[NUM=?n] VP[NUM=?n] 18 | 19 | # NP expansion productions 20 | NP[NUM=?n] -> N[NUM=?n] 21 | NP[NUM=?n] -> PropN[NUM=?n] 22 | NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n] 23 | NP[NUM=pl] -> N[NUM=pl] 24 | 25 | # VP expansion productions 26 | VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n] 27 | VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP 28 | 29 | # ################### 30 | # Lexical Productions 31 | # ################### 32 | 33 | Det[NUM=sg] -> 'this' | 'every' 34 | Det[NUM=pl] -> 'these' | 'all' 35 | Det -> 'the' | 'some' | 'several' 36 | 37 | PropN[NUM=sg]-> 'Kim' | 'Jody' 38 | 39 | N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child' 40 | N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children' 41 | 42 | IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks' 43 | TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes' 44 | 45 | IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk' 46 | TV[TENSE=pres, NUM=pl] -> 'see' | 'like' 47 | 48 | IV[TENSE=past] -> 'disappeared' | 'walked' 49 | TV[TENSE=past] -> 'saw' | 'liked' 50 | -------------------------------------------------------------------------------- /examples/grammars/spanish_grammars/spanish1.fcfg: -------------------------------------------------------------------------------- 1 | % start S 2 | # ############################ 3 | # Grammar Rules 4 | # ############################ 5 | S -> SN[num=?n,gen=?g] SV[num=?n,tiempo=?t] 6 | SN[num=?n,gen=?g,+PROP] -> NP[num=?n] 7 | SN[num=?n,gen=?g,-PROP] -> DET[num=?n,gen=?g] NC[num=?n,gen=?g] 8 | SN[num=plural,gen=?g,-PROP] -> DET[num=plural,gen=?g] NC[num=plural,gen=?g] 9 | SV[tiempo=?t,num=?n] -> VI[tiempo=?t,num=?n] 10 | SV[tiempo=?t,num=?n] -> VT[tiempo=?t,num=?n] SN[-PROP] 11 | SV[tiempo=?t,num=?n] -> VT[tiempo=?t,num=?n] PREP SN 12 | # ############################ 13 | # Lexical Rules 14 | # ############################ 15 | DET[num=singular,gen=masculino] -> 'un' | 'el' 16 | DET[num=singular,gen=femenino] -> 'una' | 'la' 17 | DET[num=plural,gen=masculino] -> 'unos' | 'los' 18 | DET[num=plural,gen=femenino] -> 'unas' | 'las' 19 | PREP -> 'a' 20 | NP[num=singular] -> 'Miguel' | 'Sara' | 'Pedro' 21 | NC[num=singular,gen=masculino] -> 'perro' | 'gato' | 'vecino' | 'profesor' 22 | NC[num=singular,gen=femenino] -> 'perra' | 'gata' | 'vecina' | 'profesora' 23 | NC[num=plural,gen=masculino] -> 'perros' | 'gatos' | 'vecinos' | 'profesores' 24 | NC[num=plural,gen=femenino] -> 'perras' | 'gatas' | 'vecinas' | 'profesoras' 25 | VI[tiempo=pasado,num=singular] -> 'desaparecio' | 'anduvo' | 'murio' 26 | VI[tiempo=presente,num=singular] -> 'desaparece' | 'anda' | 'muere' 27 | VI[tiempo=pasado,num=plural] -> 'desaparecion' | 'anduvieron' | 'murieron' 28 | VI[tiempo=presente,num=plural] -> 'desaparecen' | 'andan' | 'mueren' 29 | VT[tiempo=pasado,num=singular] -> 'vio' | 'adoró' | 'gritó' | 'odio' 30 | VT[tiempo=presente,num=singular] -> 've' | 'adora' | 'grita' | 'odia' 31 | VT[tiempo=pasado,num=plural] -> 'vieron' | 'adoraron' | 'gritaron' | 'odiaron' 32 | VT[tiempo=presente,num=plural] -> 'ven' | 'adoran' | 'gritan' | 'odian' 33 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/bindop.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem0.fcfg 2 | ## 3 | ## Feature-based grammar that divides the semantics for each element 4 | ## into two pieces: the core semantics, with path ('SEM','CORE'), and a set of 5 | ## binding operators, with path ('SEM','BO'). Each binding operator is encoded 6 | ## as a lambda-calculus expression , specifying 7 | ## that <@var> is an individual variable that should be instantiated, 8 | ## and is an expression that can bind that variable. 9 | ## 10 | ## In order for this grammar to generate the correct results, all 11 | ## variables of the form <@var> must be instantiated (i.e., replaced 12 | ## by unique new variables) whenever they are used. This can be 13 | ## accomplished by using the InstantiateVarsChart class when parsing. 14 | ## 15 | ## Author: Edward Loper , 16 | ## Ewan Klein 17 | ## URL: 18 | ## For license information, see LICENSE.TXT 19 | 20 | %start S 21 | ## Grammar summary: 22 | ## S -> NP VP 23 | ## VP -> TV NP | IV 24 | ## NP -> Det N | proper nouns... 25 | ## TV -> transitive verbs... 26 | ## IV -> intransitive verbs... 27 | ## Det -> determiners... 28 | 29 | S[SEM=[CORE=, BO={?b1+?b2}]] -> NP[SEM=[CORE=?subj, BO=?b1]] VP[SEM=[CORE=?vp, BO=?b2]] 30 | 31 | VP[SEM=[CORE=, BO={?b1+?b2}]] -> TV[SEM=[CORE=?v, BO=?b1]] NP[SEM=[CORE=?obj, BO=?b2]] 32 | 33 | VP[SEM=?s] -> IV[SEM=?s] 34 | 35 | NP[SEM=[CORE=<@x>, BO={{}+?b1+?b2}]] -> Det[SEM=[CORE=?det, BO=?b1]] N[SEM=[CORE=?n, BO=?b2]] 36 | 37 | # Lexical items: 38 | Det[SEM=[CORE=<\Q P.exists x.(Q(x) & P(x))>, BO={/}]] -> 'a' 39 | N[SEM=[CORE=, BO={/}]] -> 'dog' | 'cat' | 'mouse' 40 | IV[SEM=[CORE=<\x.bark(x)>, BO={/}]] -> 'barks' | 'eats' | 'walks' 41 | TV[SEM=[CORE=<\x y.feed(y,x)>, BO={/}]] -> 'feeds' | 'walks' 42 | NP[SEM=[CORE=<@x>, BO={}]] -> 'john' | 'alex' 43 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/simple-sem.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem3.fcfg 2 | ## 3 | ## Alternative simple grammar with transitive verbs and 4 | ## quantifiers for the book. 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | 11 | % start S 12 | ############################ 13 | # Grammar Rules 14 | ############################# 15 | 16 | S[SEM = ] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] 17 | 18 | NP[NUM=?n,SEM= ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] 19 | NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] 20 | 21 | Nom[NUM=?n,SEM=?nom] -> N[NUM=?n,SEM=?nom] 22 | 23 | VP[NUM=?n,SEM=?v] -> IV[NUM=?n,SEM=?v] 24 | VP[NUM=?n,SEM=] -> TV[NUM=?n,SEM=?v] NP[SEM=?obj] 25 | VP[NUM=?n,SEM=] -> DTV[NUM=?n,SEM=?v] NP[SEM=?obj] PP[+TO,SEM=?pp] 26 | 27 | PP[+TO, SEM=?np] -> P[+TO] NP[SEM=?np] 28 | 29 | ############################# 30 | # Lexical Rules 31 | ############################# 32 | 33 | PropN[-LOC,NUM=sg,SEM=<\P.P(angus)>] -> 'Angus' 34 | PropN[-LOC,NUM=sg,SEM=<\P.P(cyril)>] -> 'Cyril' 35 | PropN[-LOC,NUM=sg,SEM=<\P.P(irene)>] -> 'Irene' 36 | 37 | Det[NUM=sg,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'every' 38 | Det[NUM=pl,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'all' 39 | Det[SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'some' 40 | Det[NUM=sg,SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'a' 41 | Det[NUM=sg,SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'an' 42 | 43 | N[NUM=sg,SEM=<\x.man(x)>] -> 'man' 44 | N[NUM=sg,SEM=<\x.girl(x)>] -> 'girl' 45 | N[NUM=sg,SEM=<\x.boy(x)>] -> 'boy' 46 | N[NUM=sg,SEM=<\x.bone(x)>] -> 'bone' 47 | N[NUM=sg,SEM=<\x.ankle(x)>] -> 'ankle' 48 | N[NUM=sg,SEM=<\x.dog(x)>] -> 'dog' 49 | N[NUM=pl,SEM=<\x.dog(x)>] -> 'dogs' 50 | 51 | IV[NUM=sg,SEM=<\x.bark(x)>,TNS=pres] -> 'barks' 52 | IV[NUM=pl,SEM=<\x.bark(x)>,TNS=pres] -> 'bark' 53 | IV[NUM=sg,SEM=<\x.walk(x)>,TNS=pres] -> 'walks' 54 | IV[NUM=pl,SEM=<\x.walk(x)>,TNS=pres] -> 'walk' 55 | TV[NUM=sg,SEM=<\X x.X(\y.chase(x,y))>,TNS=pres] -> 'chases' 56 | TV[NUM=pl,SEM=<\X x.X(\y.chase(x,y))>,TNS=pres] -> 'chase' 57 | TV[NUM=sg,SEM=<\X x.X(\y.see(x,y))>,TNS=pres] -> 'sees' 58 | TV[NUM=pl,SEM=<\X x.X(\y.see(x,y))>,TNS=pres] -> 'see' 59 | TV[NUM=sg,SEM=<\X x.X(\y.bite(x,y))>,TNS=pres] -> 'bites' 60 | TV[NUM=pl,SEM=<\X x.X(\y.bite(x,y))>,TNS=pres] -> 'bite' 61 | DTV[NUM=sg,SEM=<\Y X x.X(\z.Y(\y.give(x,y,z)))>,TNS=pres] -> 'gives' 62 | DTV[NUM=pl,SEM=<\Y X x.X(\z.Y(\y.give(x,y,z)))>,TNS=pres] -> 'give' 63 | 64 | P[+to] -> 'to' 65 | 66 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/storage.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: storage.fcfg 2 | ## 3 | ## Feature-based grammar that implements Cooper storage by dividing the 4 | ## semantics for each phrase into two pieces: the core semantics 5 | ## ('SEM','CORE') and a sequence of binding operators ('SEM','STORE'). 6 | ## Each binding operator is encoded as a logic term , 7 | ## where is a quantifier expression and the individual variable 8 | ## <@var> specifies the 'address' of the quantifier in the core 9 | ## semantics. and is a predicate describing that variable. 10 | 11 | ## In order for this grammar to generate the correct results, all 12 | ## variables of the form <@var> must be instantiated (i.e., replaced 13 | ## by unique new variables) whenever they are used. This can be 14 | ## accomplished by using the InstantiateVarsChart class when parsing. 15 | ## 16 | ## Author: Edward Loper , 17 | ## Ewan Klein 18 | ## Robin Cooper 19 | ## URL: 20 | ## For license information, see LICENSE.TXT 21 | 22 | %start S 23 | 24 | S[SEM=[CORE=, STORE=(?b1+?b2)]] -> NP[SEM=[CORE=?subj, STORE=?b1]] VP[SEM=[CORE=?vp, STORE=?b2]] 25 | 26 | VP[SEM=?s] -> IV[SEM=?s] 27 | VP[SEM=[CORE=, STORE=(?b1+?b2)]] -> TV[SEM=[CORE=?v, STORE=?b1]] NP[SEM=[CORE=?obj, STORE=?b2]] 28 | VP[SEM=[CORE=, STORE=(?b1+?b2+?b3)]] -> DTV[SEM=[CORE=?v, STORE=?b1]] NP[SEM=[CORE=?obj, STORE=?b2]] PP[+TO, SEM=[CORE=?pp, STORE=?b3]] 29 | 30 | NP[SEM=[CORE=<@x>, STORE=(()+?b1+?b2)]] -> Det[SEM=[CORE=?det, STORE=?b1]] N[SEM=[CORE=?n, STORE=?b2]] 31 | 32 | PP[+TO, SEM=[CORE=?np, STORE=?b1]] -> P NP[SEM=[CORE=?np, STORE=?b1]] 33 | 34 | # Lexical items: 35 | Det[SEM=[CORE=<\Q P.exists x.(Q(x) & P(x))>, STORE=(/)]] -> 'a' 36 | Det[SEM=[CORE=<\Q P.all x.(Q(x) implies P(x))>, STORE=(/)]] -> 'every' 37 | 38 | N[SEM=[CORE=, STORE=(/)]] -> 'dog' 39 | N[SEM=[CORE=, STORE=(/)]] -> 'bone' 40 | N[SEM=[CORE=, STORE=(/)]] -> 'girl' 41 | N[SEM=[CORE=, STORE=(/)]] -> 'man' 42 | 43 | IV[SEM=[CORE=<\x.smile(x)>, STORE=(/)]] -> 'smiles' 44 | IV[SEM=[CORE=<\x.walk(x)>, STORE=(/)]] -> 'walks' 45 | 46 | TV[SEM=[CORE=<\y x.feed(x,y)>, STORE=(/)]] -> 'feeds' 47 | TV[SEM=[CORE=<\y x.chase(x,y)>, STORE=(/)]] -> 'chases' 48 | 49 | DTV[SEM=[CORE=<\z y x.give(x,y,z)>, STORE=(/)]] -> 'gives' 50 | 51 | NP[SEM=[CORE=<@x>, STORE=()]] -> 'Angus' 52 | NP[SEM=[CORE=<@x>, STORE=()]] -> 'Cyril' 53 | 54 | P[+TO] -> 'to' 55 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/sem2.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem2.fcfg 2 | ## 3 | ## Longer feature-based grammar with more quantifers, and illustrating 4 | ## transitive verbs and prepositional phrases (PPs). The 5 | ## interpretation of PPs is a bit weird and could do with further 6 | ## work. 7 | ## 8 | ## Author: Ewan Klein 9 | ## URL: 10 | ## For license information, see LICENSE.TXT 11 | 12 | % start S 13 | ############################ 14 | # Grammar Rules 15 | ############################# 16 | 17 | S[SEM = ] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] 18 | 19 | NP[NUM=?n,SEM= ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] 20 | NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] 21 | 22 | Nom[NUM=?n,SEM=?nom] -> N[NUM=?n,SEM=?nom] 23 | Nom[NUM=?n,SEM=] -> N[NUM=?n,SEM=?nom] PP[SEM=?pp] 24 | 25 | VP[NUM=?n,SEM=] -> TV[NUM=?n,SEM=?v] NP[SEM=?obj] 26 | VP[NUM=?n,SEM=?v] -> IV[NUM=?n,SEM=?v] 27 | 28 | VP[NUM=?n,SEM=] -> VP[NUM=?n,SEM=?vp] PP[SEM=?pp] 29 | 30 | PP[SEM=] -> P[LOC=?l,SEM=?p] NP[LOC=?l,SEM=?np] 31 | 32 | ############################# 33 | # Lexical Rules 34 | ############################# 35 | 36 | PropN[-LOC,NUM=sg,SEM=<\P.P(john)>] -> 'John' 37 | PropN[-LOC,NUM=sg,SEM=<\P.P(mary)>] -> 'Mary' 38 | PropN[-LOC,NUM=sg,SEM=<\P.P(suzie)>] -> 'Suzie' 39 | PropN[-LOC,NUM=sg,SEM=<\P.P(fido)>] -> 'Fido' 40 | PropN[+LOC, NUM=sg,SEM=<\P.P(noosa)>] -> 'Noosa' 41 | 42 | NP[-LOC, NUM=sg, SEM=<\P.\x.P(x)>] -> 'who' 43 | 44 | Det[NUM=sg,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'every' 45 | Det[NUM=pl,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'all' 46 | Det[SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'some' 47 | Det[NUM=sg,SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'a' 48 | 49 | N[NUM=sg,SEM=<\x.boy(x)>] -> 'boy' 50 | N[NUM=pl,SEM=<\x.boy(x)>] -> 'boys' 51 | N[NUM=sg,SEM=<\x.girl(x)>] -> 'girl' 52 | N[NUM=pl,SEM=<\x.girl(x)>] -> 'girls' 53 | N[NUM=sg,SEM=<\x.dog(x)>] -> 'dog' 54 | N[NUM=pl,SEM=<\x.dog(x)>] -> 'dogs' 55 | 56 | TV[NUM=sg,SEM=<\X y.X(\x.chase(y,x))>,TNS=pres] -> 'chases' 57 | TV[NUM=pl,SEM=<\X y.X(\x.chase(y,x))>,TNS=pres] -> 'chase' 58 | TV[NUM=sg,SEM=<\X y.X(\x.see(y,x))>,TNS=pres] -> 'sees' 59 | TV[NUM=pl,SEM=<\X y.X(\x.see(y,x))>,TNS=pres] -> 'see' 60 | TV[NUM=sg,SEM=<\X y.X(\x.chase(y,x))>,TNS=pres] -> 'chases' 61 | TV[NUM=pl,SEM=<\X y.X(\x.chase(y,x))>,TNS=pres] -> 'chase' 62 | IV[NUM=sg,SEM=<\x.bark(x)>,TNS=pres] -> 'barks' 63 | IV[NUM=pl,SEM=<\x.bark(x)>,TNS=pres] -> 'bark' 64 | IV[NUM=sg,SEM=<\x.walk(x)>,TNS=pres] -> 'walks' 65 | IV[NUM=pl,SEM=<\x.walk(x)>,TNS=pres] -> 'walk' 66 | 67 | P[+LOC,SEM=<\X P x.X(\y.(P(x) & in(x,y)))>] -> 'in' 68 | P[-LOC,SEM=<\X P x.X(\y.(P(x) & with(x,y)))>] -> 'with' 69 | -------------------------------------------------------------------------------- /examples/semantics/sem2.cfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: sem2.cfg 2 | ## 3 | ## Longer feature-based grammar with more quantifers, and illustrating 4 | ## transitive verbs and prepositional phrases (PPs). The 5 | ## interpretation of PPs is a bit weird and could do with further 6 | ## work. 7 | ## 8 | ## Author: Ewan Klein 9 | ## URL: 10 | ## For license information, see LICENSE.TXT 11 | 12 | % start S 13 | ############################ 14 | # Grammar Rules 15 | ############################# 16 | 17 | S[sem = ] -> NP[num=?n,sem=?subj] VP[num=?n,sem=?vp] 18 | 19 | NP[num=?n,sem= ] -> Det[num=?n,sem=?det] Nom[num=?n,sem=?nom] 20 | NP[loc=?l,num=?n,sem=?np] -> PropN[loc=?l,num=?n,sem=?np] 21 | 22 | Nom[num=?n,sem=?nom] -> N[num=?n,sem=?nom] 23 | Nom[num=?n,sem=] -> N[num=?n,sem=?nom] PP[sem=?pp] 24 | 25 | VP[num=?n,sem=] -> TV[num=?n,sem=?v] NP[sem=?obj] 26 | VP[num=?n,sem=?v] -> IV[num=?n,sem=?v] 27 | 28 | VP[num=?n,sem=] -> VP[num=?n,sem=?vp] PP[sem=?pp] 29 | 30 | PP[sem=] -> P[loc=?l,sem=?p] NP[loc=?l,sem=?np] 31 | 32 | ############################# 33 | # Lexical Rules 34 | ############################# 35 | 36 | PropN[-loc,num=sg,sem=<\P.(P john)>] -> 'John' 37 | PropN[-loc,num=sg,sem=<\P.(P mary)>] -> 'Mary' 38 | PropN[-loc,num=sg,sem=<\P.(P suzie)>] -> 'Suzie' 39 | PropN[-loc,num=sg,sem=<\P.(P fido)>] -> 'Fido' 40 | PropN[+loc, num=sg,sem=<\P.(P noosa)>] -> 'Noosa' 41 | 42 | NP[-loc, num=sg, sem=<\P.\x.(P x)>] -> 'who' 43 | 44 | Det[num=sg,sem=<\P Q. all x. ((P x) implies (Q x))>] -> 'every' 45 | Det[num=pl,sem=<\P Q. all x. ((P x) implies (Q x))>] -> 'all' 46 | Det[sem=<\P Q. some x. ((P x) and (Q x))>] -> 'some' 47 | Det[num=sg,sem=<\P Q. some x. ((P x) and (Q x))>] -> 'a' 48 | 49 | N[num=sg,sem=] -> 'boy' 50 | N[num=pl,sem=] -> 'boys' 51 | N[num=sg,sem=] -> 'girl' 52 | N[num=pl,sem=] -> 'girls' 53 | N[num=sg,sem=] -> 'dog' 54 | N[num=pl,sem=] -> 'dogs' 55 | 56 | TV[num=sg,sem=<\X y. (X \x. (chase x y))>,tns=pres] -> 'chases' 57 | TV[num=pl,sem=<\X y. (X \x. (chase x y))>,tns=pres] -> 'chase' 58 | TV[num=sg,sem=<\X y. (X \x. (see x y))>,tns=pres] -> 'sees' 59 | TV[num=pl,sem=<\X y. (X \x. (see x y))>,tns=pres] -> 'see' 60 | TV[num=sg,sem=<\X y. (X \x. (chase x y))>,tns=pres] -> 'chases' 61 | TV[num=pl,sem=<\X y. (X \x. (chase x y))>,tns=pres] -> 'chase' 62 | IV[num=sg,sem=<\x. (bark x)>,tns=pres] -> 'barks' 63 | IV[num=pl,sem=<\x. (bark x)>,tns=pres] -> 'bark' 64 | IV[num=sg,sem=<\x. (walk x)>,tns=pres] -> 'walks' 65 | IV[num=pl,sem=<\x. (walk x)>,tns=pres] -> 'walk' 66 | 67 | P[+loc,sem=<\X P x. (X \y. ((P x) and (in y x)))>] -> 'in' 68 | P[-loc,sem=<\X P x. (X \y. ((P x) and (with y x)))>] -> 'with' 69 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/event.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: event.fcfg 2 | ## 3 | ## Illustrating Davidson-style event semantics 4 | ## 5 | ## Author: Ewan Klein 6 | ## URL: 7 | ## For license information, see LICENSE.TXT 8 | 9 | % start S 10 | ############################ 11 | # Grammar Rules 12 | ############################# 13 | 14 | S[sem = ] -> NP[num=?n,sem=?subj] VP[num=?n,sem=?vp] 15 | 16 | NP[num=?n,sem= ] -> Det[num=?n,sem=?det] Nom[num=?n,sem=?nom] 17 | NP[loc=?l,num=?n,sem=?np] -> PropN[loc=?l,num=?n,sem=?np] 18 | 19 | Nom[num=?n,sem=?nom] -> N[num=?n,sem=?nom] 20 | Nom[num=?n,sem=] -> N[num=?n,sem=?nom] PP[sem=?pp] 21 | 22 | VP[num=?n,sem=?v] -> IV[num=?n,sem=?v] 23 | VP[num=?n,sem=] -> TV[num=?n,sem=?v] NP[sem=?obj] 24 | VP[num=?n,sem=] -> DTV[num=?n,sem=?v] NP[sem=?obj] PP[+to, sem=?pp] 25 | 26 | 27 | VP[num=?n,sem=] -> VP[num=?n,sem=?vp] PP[sem=?pp] 28 | VP[num=?n,sem=] -> VP[num=?n,sem=?vp] Adv[sem=?adv] 29 | 30 | PP[sem=] -> P[loc=?l,sem=?p] NP[loc=?l,sem=?np] 31 | 32 | ############################# 33 | # Lexical Rules 34 | ############################# 35 | 36 | PropN[-loc,num=sg,sem=<\e R.R(e,angus)>] -> 'Angus' 37 | PropN[-loc,num=sg,sem=<\e R.R(e,pat)>] -> 'Pat' 38 | PropN[-loc,num=sg,sem=<\e R.R(e,irene)>] -> 'Irene' 39 | PropN[-loc,num=sg,sem=<\e R.R(e,cyril)>] -> 'Cyril' 40 | PropN[+loc, num=sg,sem=<\e R.R(e,stockbridge)>] -> 'Stockbridge' 41 | 42 | NP[-loc, num=sg, sem=<\P.\x.P(x)>] -> 'who' 43 | 44 | Det[num=sg,sem=<\P R e.all x.(P(x) -> R(e,x))>] -> 'every' 45 | Det[num=pl,sem=<\P R e.all x.(P(x) -> R(e,x))>] -> 'all' 46 | Det[sem=<\P R e.exists x.(P(x) & R(e,x))>] -> 'some' 47 | Det[num=sg,sem=<\P R e.exists x.(P(x) & R(e,x))>] -> 'a' 48 | 49 | N[num=sg,sem=] -> 'boy' 50 | N[num=pl,sem=] -> 'boys' 51 | N[num=sg,sem=] -> 'girl' 52 | N[num=pl,sem=] -> 'girls' 53 | N[num=sg,sem=] -> 'bone' 54 | N[num=sg,sem=] -> 'dog' 55 | 56 | IV[num=sg,sem=<\e x.(bark(e) & agent(e,x))>,tns=pres] -> 'barks' 57 | IV[num=pl,sem=<\e x.(bark(e) & agent(e,x))>,tns=pres] -> 'bark' 58 | IV[num=sg,sem=<\e x.(walk(e) & agent(e,x))>,tns=pres] -> 'walks' 59 | IV[num=pl,sem=<\e x.( walk(e) & agent(e,x))>,tns=pres] -> 'walk' 60 | TV[num=sg,sem=<\X y.X(\e x.(chase(e) & agent(e,y) & patient(e,x)))>,tns=pres] -> 'chases' 61 | TV[num=pl,sem=<\X y.X(\e x.(chase(e) & agent(e,y) & patient(e,x)))>,tns=pres] -> 'chase' 62 | TV[num=sg,sem=<\X y.X(\e x.(see(e) & agent(e,y) & patient(e,x)))>,tns=pres] -> 'sees' 63 | TV[num=pl,sem=<\X y.X(\e x.(see(e) & agent(e,y) & patient(e,x)))>,tns=pres] -> 'see' 64 | DTV[num=sg,sem=<\Y X x.X(\z.Y(\e y.(give(e) & agent(e,x) & theme(e,y) & recip(e,z))))>,tns=pres] -> 'gives' 65 | DTV[num=pl,sem=<\Y X x.X(\z.Y(\e y.(give(e) & agent(e,x) & theme(e,y) & recip(e,z))))>,tns=pres] -> 'give' 66 | 67 | P[+loc,sem=<\X P e.X(\y.(P(e) & in(e,y)))>] -> 'in' 68 | P[-loc,sem=<\X P e.X(\y.(P(e) & with(e,y)))>] -> 'with' 69 | P[+to,sem=<\X.X>] -> 'to' 70 | 71 | Adv[sem=<\R e x.(slow(e) & R(e,x))>] -> 'slowly' 72 | Adv[sem=<\R e x.(thoughtful(e) & R(e,x))>] -> 'thoughtfully' 73 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/glue.semtype: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # Glue Semantics Formulas Using Event Representation 3 | # 4 | # Entries are made up of three parts, separated by colons (":") 5 | # 6 | # 1) The semtype name. 7 | # - May appear multiple times with different relationship sets (3) 8 | # - May "extend" other semtypes: "type(parent)" 9 | # 10 | # 2) The glue formulas. 11 | # - A comma-separated list of tuples representing glue formulas 12 | # - If the entry is an extension, then the listed formulas will be added to 13 | # the list from the super type 14 | # 15 | # 3) The relationship set (OPTIONAL) 16 | # - If not specified, then assume the entry covers ALL relationship sets 17 | # - If the entry is an extension, then the relationship set dictates which 18 | # particular entry should be extended. If no relationship set is 19 | # specified, then every entry of the parent type is extended. 20 | # 21 | ######################################################################## 22 | 23 | #Quantifiers 24 | def_art : (\P Q.exists x.(P(x) & all y.(Q(y) <-> (x = y))), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 25 | ex_quant : (\P Q.exists x.(P(x) & Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 26 | univ_quant : (\P Q.all x.(P(x) -> Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 27 | no_quant : (\P Q.-exists x.(P(x) & Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 28 | 29 | #Nouns 30 | NN : (\x.(x), (v -o r)) : [spec] 31 | NN : (\P Q.exists x.(P(x) & Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.(x), (v -o r)) : [] # treat a noun missing its spec as implicitly existentially quantified 32 | NNP : (\P Q.exists x.(P(x) & Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.(x), (v -o r)) 33 | NNS(NN) 34 | PRP : (\P Q.exists x.(P(x) & Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.PRO(x), (v -o r)) 35 | 36 | #Verbs 37 | VB : (\x.(x), (subj -o f)) : [subj] #iv 38 | VB : (\x y.(x,y), (subj -o (obj -o f))) : [subj, obj] #tv 39 | VB : (\y.exists x.(x,y), (obj -o f)) : [obj] #incomplete tv 40 | VB : (\x y z.(x,y,z), (subj -o (obj -o (theme -o f)))) : [subj, obj, theme] #dtv 41 | VB : (\y z.exists x.(x,y,z), obj -o (theme -o f)) : [obj, theme] #incomplete dtv 42 | VB : (\x z.exists y.(x,y,z), subj -o (theme -o f)) : [subj, theme] #incomplete dtv 43 | VB : (\z.exists x y.(x,y,z), theme -o f) : [theme] #incomplete dtv 44 | VB : (\x y.(x,y), (subj -o (comp -o f))) : [subj, comp] #tv_comp 45 | VB : (\x P.(x,P), (subj -o ((xcomp.subj -o xcomp) -o f))) : [subj, xcomp] #equi 46 | VB : (\x y P.(x,y,P), (subj -o (obj -o ((xcomp.subj -o xcomp) -o f)))) : [subj, obj, xcomp] # object equi 47 | VB : (\P.(P), (xcomp -o f)) : [xcomp] #raising 48 | VBD(VB) : (\P.PAST(P), (f -o f)) 49 | VBZ(VB) 50 | 51 | #Modifiers 52 | nmod : (\Q P x.(P(x) & Q(x)), (f -o ((super.v -o super.r) -o (super.v -o super.r)))), (\x.(x), f) 53 | JJ(nmod) 54 | vmod : (\P.(P), (super.f -o super.f)) 55 | RB(vmod) 56 | tense : (\P.(P), (super.f -o super.f)) 57 | 58 | #Conjunctions 59 | cc_clause : (\P Q.(P & Q), (a -o (b -o f))) 60 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/german.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: german.fcfg 2 | ## 3 | ## Example of a feature-based grammar for German, illustrating 4 | ## CASE and AGR features (PER, GND, NUM) working as a bundle. 5 | ## Used in Feature-Based Grammars chapter. 6 | ## 7 | ## Author: Michaela Atterer 8 | ## Ewan Klein 9 | ## 10 | ## Plural transitive verbs productions by Jordan Boyd-Graber (ezubaric at users.sourceforge.net) 11 | 12 | % start S 13 | ##################### 14 | # Grammar Productions 15 | ##################### 16 | S -> NP[CASE=nom, AGR=?a] VP[AGR=?a] 17 | 18 | NP[CASE=?c, AGR=?a] -> PRO[CASE=?c, AGR=?a] 19 | NP[CASE=?c, AGR=?a] -> Det[CASE=?c, AGR=?a] N[CASE=?c, AGR=?a] 20 | 21 | VP[AGR=?a] -> IV[AGR=?a] 22 | VP[AGR=?a] -> TV[OBJCASE=?c, AGR=?a] NP[CASE=?c] 23 | 24 | ##################### 25 | # Lexical Productions 26 | ##################### 27 | # Singular determiners 28 | 29 | # masc 30 | Det[CASE=nom, AGR=[GND=masc,PER=3,NUM=sg]] -> 'der' 31 | Det[CASE=dat, AGR=[GND=masc,PER=3,NUM=sg]] -> 'dem' 32 | Det[CASE=acc, AGR=[GND=masc,PER=3,NUM=sg]] -> 'den' 33 | 34 | # fem 35 | Det[CASE=nom, AGR=[GND=fem,PER=3,NUM=sg]] -> 'die' 36 | Det[CASE=dat, AGR=[GND=fem,PER=3,NUM=sg]] -> 'der' 37 | Det[CASE=acc, AGR=[GND=fem,PER=3,NUM=sg]] -> 'die' 38 | 39 | # Plural determiners 40 | Det[CASE=nom, AGR=[PER=3,NUM=pl]] -> 'die' 41 | Det[CASE=dat, AGR=[PER=3,NUM=pl]] -> 'den' 42 | Det[CASE=acc, AGR=[PER=3,NUM=pl]] -> 'die' 43 | 44 | # Nouns 45 | N[AGR=[GND=masc,PER=3,NUM=sg]] -> 'Hund' 46 | N[CASE=nom, AGR=[GND=masc,PER=3,NUM=pl]] -> 'Hunde' 47 | N[CASE=dat, AGR=[GND=masc,PER=3,NUM=pl]] -> 'Hunden' 48 | N[CASE=acc, AGR=[GND=masc,PER=3,NUM=pl]] -> 'Hunde' 49 | 50 | N[AGR=[GND=fem,PER=3,NUM=sg]] -> 'Katze' 51 | N[AGR=[GND=fem,PER=3,NUM=pl]] -> 'Katzen' 52 | 53 | # Pronouns 54 | PRO[CASE=nom, AGR=[PER=1,NUM=sg]] -> 'ich' 55 | PRO[CASE=acc, AGR=[PER=1,NUM=sg]] -> 'mich' 56 | PRO[CASE=dat, AGR=[PER=1,NUM=sg]] -> 'mir' 57 | PRO[CASE=nom, AGR=[PER=2,NUM=sg]] -> 'du' 58 | PRO[CASE=nom, AGR=[PER=3,NUM=sg]] -> 'er' | 'sie' | 'es' 59 | PRO[CASE=nom, AGR=[PER=1,NUM=pl]] -> 'wir' 60 | PRO[CASE=acc, AGR=[PER=1,NUM=pl]] -> 'uns' 61 | PRO[CASE=dat, AGR=[PER=1,NUM=pl]] -> 'uns' 62 | PRO[CASE=nom, AGR=[PER=2,NUM=pl]] -> 'ihr' 63 | PRO[CASE=nom, AGR=[PER=3,NUM=pl]] -> 'sie' 64 | 65 | # Verbs 66 | IV[AGR=[NUM=sg,PER=1]] -> 'komme' 67 | IV[AGR=[NUM=sg,PER=2]] -> 'kommst' 68 | IV[AGR=[NUM=sg,PER=3]] -> 'kommt' 69 | IV[AGR=[NUM=pl, PER=1]] -> 'kommen' 70 | IV[AGR=[NUM=pl, PER=2]] -> 'kommt' 71 | IV[AGR=[NUM=pl, PER=3]] -> 'kommen' 72 | 73 | TV[OBJCASE=acc, AGR=[NUM=sg,PER=1]] -> 'sehe' | 'mag' 74 | TV[OBJCASE=acc, AGR=[NUM=sg,PER=2]] -> 'siehst' | 'magst' 75 | TV[OBJCASE=acc, AGR=[NUM=sg,PER=3]] -> 'sieht' | 'mag' 76 | TV[OBJCASE=dat, AGR=[NUM=sg,PER=1]] -> 'folge' | 'helfe' 77 | TV[OBJCASE=dat, AGR=[NUM=sg,PER=2]] -> 'folgst' | 'hilfst' 78 | TV[OBJCASE=dat, AGR=[NUM=sg,PER=3]] -> 'folgt' | 'hilft' 79 | TV[OBJCASE=acc, AGR=[NUM=pl,PER=1]] -> 'sehen' | 'moegen' 80 | TV[OBJCASE=acc, AGR=[NUM=pl,PER=2]] -> 'sieht' | 'moegt' 81 | TV[OBJCASE=acc, AGR=[NUM=pl,PER=3]] -> 'sehen' | 'moegen' 82 | TV[OBJCASE=dat, AGR=[NUM=pl,PER=1]] -> 'folgen' | 'helfen' 83 | TV[OBJCASE=dat, AGR=[NUM=pl,PER=2]] -> 'folgt' | 'helft' 84 | TV[OBJCASE=dat, AGR=[NUM=pl,PER=3]] -> 'folgen' | 'helfen' 85 | 86 | 87 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/drt_glue.semtype: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # DRT-Glue Semantics Formulas Using DRT and Event Representation 3 | # 4 | # Entries are made up of three parts, separated by colons (":") 5 | # 6 | # 1) The semtype name. 7 | # - May appear multiple times with different relationship sets (3) 8 | # - May "extend" other semtypes: "type(parent)" 9 | # 10 | # 2) The glue formulas. 11 | # - A comma-separated list of tuples representing glue formulas 12 | # - If the entry is an extension, then the listed formulas will be added to 13 | # the list from the super type 14 | # 15 | # 3) The relationship set (OPTIONAL) 16 | # - If not specified, then assume the entry covers ALL relationship sets 17 | # - If the entry is an extension, then the relationship set dictates which 18 | # particular entry should be extended. If no relationship set is 19 | # specified, then every entry of the parent type is extended. 20 | # 21 | ######################################################################## 22 | 23 | #Quantifiers 24 | def_art : (\P Q.([x],[((([y],[])+Q(y)) <-> (x = y)), P(x)]), ((v -o r) -o ((f -o Var) -o Var))) 25 | ex_quant : (\P Q.(([x],[])+P(x)+Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 26 | univ_quant : (\P Q.([],[((([x],[])+P(x)) -> Q(x))]), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 27 | no_quant : (\P Q.(-(([x],[])+P(x)+Q(x))), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 28 | 29 | #Nouns 30 | NN : (\Q.(([x],[(x)])+Q(x)), ((f -o Var) -o Var)) : [] # treat a noun missing its spec as implicitly existentially quantified 31 | 32 | NN : (\x.([],[(x)]), (v -o r)) : [spec] 33 | NN : (\P Q.(([x],[]) + P(x) + Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[(x)]), (v -o r)) : [] # treat a noun missing its spec as implicitly existentially quantified 34 | NNP : (\P Q.(([x],[]) + P(x) + Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[(x)]), (v -o r)) 35 | NNS(NN) 36 | PRP : (\P Q.(([x],[]) + P(x) + Q(x)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[PRO(x)]), (v -o r)) 37 | 38 | #Verbs 39 | VB : (\x.([],[(x)]), (subj -o f)) : [subj] #iv 40 | VB : (\x y.([],[(x,y)]), (subj -o (obj -o f))) : [subj, obj] #tv 41 | VB : (\y.exists x.([],[(x,y)]), (obj -o f)) : [obj] #incomplete tv 42 | VB : (\x y z.([],[(x,y,z)]), (subj -o (obj -o (theme -o f)))) : [subj, obj, theme] #dtv 43 | VB : (\y z.exists x.([],[(x,y,z)]), obj -o (theme -o f)) : [obj, theme] #incomplete dtv 44 | VB : (\x z.exists y.([],[(x,y,z)]), subj -o (theme -o f)) : [subj, theme] #incomplete dtv 45 | VB : (\z.exists x y.([],[(x,y,z)]), theme -o f) : [theme] #incomplete dtv 46 | VB : (\x y.([],[(x,y)]), (subj -o (comp -o f))) : [subj, comp] #tv_comp 47 | VB : (\x P.([],[(x,P)]), (subj -o ((xcomp.subj -o xcomp) -o f))) : [subj, xcomp] #equi 48 | VB : (\x y P.([],[(x,y,P)]), (subj -o (obj -o ((xcomp.subj -o xcomp) -o f)))) : [subj, obj, xcomp] # object equi 49 | VB : (\P.([],[(P)]), (xcomp -o f)) : [xcomp] #raising 50 | VBD(VB) : (\P.PAST(P), (f -o f)) 51 | VBZ(VB) 52 | 53 | #Modifiers 54 | nmod : (\x.([],[(x)]), f), (\P Q x.(P(x)+Q(x)), (f -o ((super.v -o super.r) -o (super.v -o super.r)))) 55 | JJ(nmod) 56 | vmod : (\x.([],[(x)]), f), (\P Q x.P(Q(x)), (f -o (super -o super))) 57 | RB(vmod) 58 | tense : (\P.([],[(P)]), (super.f -o super.f)) 59 | 60 | #Conjunctions 61 | cc_clause : (\P Q.(P + Q), (a -o (b -o f))) 62 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/drt_glue_event.semtype: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # DRT-Glue Semantics Formulas Using DRT and Event Representation 3 | # 4 | # Entries are made up of three parts, separated by colons (":") 5 | # 6 | # 1) The semtype name. 7 | # - May appear multiple times with different relationship sets (3) 8 | # - May "extend" other semtypes: "type(parent)" 9 | # 10 | # 2) The glue formulas. 11 | # - A comma-separated list of tuples representing glue formulas 12 | # - If the entry is an extension, then the listed formulas will be added to 13 | # the list from the super type 14 | # 15 | # 3) The relationship set (OPTIONAL) 16 | # - If not specified, then assume the entry covers ALL relationship sets 17 | # - If the entry is an extension, then the relationship set dictates which 18 | # particular entry should be extended. If no relationship set is 19 | # specified, then every entry of the parent type is extended. 20 | # 21 | ######################################################################## 22 | 23 | #Quantifiers 24 | def_art : (\P Q.([x],[((([y],[])+Q(y)) <-> (x = y)), P(x)]), ((v -o r) -o ((f -o Var) -o Var))) 25 | ex_quant : (\P Q.(([x],[])+P(x)+Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 26 | univ_quant : (\P Q.([],[((([x],[])+P(x)) -> Q(x))]), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 27 | no_quant : (\P Q.(-(([x],[])+P(x)+Q(x))), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 28 | 29 | #Nouns 30 | NN : (\x.([],[(x)]), (v -o r)) : [spec] 31 | NN : (\P Q e.(([x],[]) + P(x) + Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[(x)]), (v -o r)) : [] # treat a noun missing its spec as implicitly existentially quantified 32 | NNP : (\P Q e.(([x],[]) + P(x) + Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[(x)]), (v -o r)) 33 | NNS(NN) 34 | PRP : (\P Q e.(([x],[]) + P(x) + Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.([],[PRO(x)]), (v -o r)) 35 | 36 | #Verbs 37 | VB : (\x e.([],[(e),subj(e,x)]), (subj -o f)) [subj] #iv 38 | VB : (\x y e.([],[(e), subj(e,x), obj(e,y)]), (subj -o (obj -o f))) : [subj, obj] #tv 39 | VB : (\x y z e.([],[(e), subj(e,x), obj(e,y), theme(e,z)]), (subj -o (obj -o (theme -o f)))) : [subj, obj, theme] #dtv 40 | VB : (\y z e.([x],[(e), subj(e,x), obj(e,y), theme(e,z)]), obj -o (theme -o f)) : [obj, theme] #incomplete dtv 41 | VB : (\x z e.([y],[(e), subj(e,x), obj(e,y), theme(e,z)]), subj -o (theme -o f)) : [subj, theme] #incomplete dtv 42 | VB : (\z e.([x,y],[(e), subj(e,x), obj(e,y), theme(e,z)]), theme -o f) : [theme] #incomplete dtv 43 | VB : (\x y e.(([],[(e), subj(e,x), comp(e,y)])+P(e)), (subj -o (comp -o f))) : [subj, comp] #tv_comp 44 | VB : (\x P e.([],[(e), subj(e,x), xcomp(e,P)]), (subj -o ((xcomp.subj -o xcomp) -o f))) : [subj, xcomp] #equi 45 | VB : (\x y P e.([],[(e), subj(e,x), obj(e,y), (xcomp e P)]), (subj -o (obj -o ((xcomp.subj -o xcomp) -o f)))) : [subj, obj, xcomp] # object equi 46 | VB : (\P e.([],[(e), xcomp(e,P)]), (xcomp -o f)) : [xcomp] #raising 47 | VBD(VB) : (\P.PAST(P), (f -o f)) 48 | VBZ(VB) 49 | 50 | #Modifiers 51 | nmod : (\x.([],[(x)]), f), (\P Q x.(P(x)+Q(x)), (f -o ((super.v -o super.r) -o (super.v -o super.r)))) : [] 52 | JJ(nmod) : [] 53 | vmod : (\x.([],[(x)]), f), (\P Q x.P(Q(x)), (f -o (super -o super))) : [] 54 | RB(vmod) : [] 55 | tense(vmod) : [] 56 | 57 | #Prepositions 58 | IN : (\P Q e1.P(\x e2.(([],[(e2,x)]) + Q(e2)),e1), ((subj -o subj.var) -o subj.var) -o (super -o super)) : [subj] 59 | IN(vmod) : [] 60 | 61 | #Conjunctions 62 | cc_clause : (\P Q.(P + Q), (a -o (b -o f))) 63 | -------------------------------------------------------------------------------- /examples/school/words.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import re, random 4 | 5 | from collections import defaultdict 6 | 7 | ############################################################################### 8 | ### FILE ACCESS 9 | ############################################################################### 10 | 11 | def read_words(filename): 12 | "Get the words out of the file, ignoring case and punctuation." 13 | text = open(filename).read().lower() 14 | return re.split('\W+', text) 15 | 16 | def read_text(filename): 17 | "Load the file into a text string, normalising whitespace." 18 | text = open(filename).read() 19 | return re.sub('\s+', ' ', text) 20 | 21 | ############################################################################### 22 | ### SEARCHING 23 | ############################################################################### 24 | 25 | def print_conc(pattern, text, num=25): 26 | "Print segments of the file that match the pattern." 27 | for i in range(num): 28 | m = re.search(pattern, text) 29 | if not m: 30 | break 31 | print(text[m.start()-30:m.start()+40]) 32 | text = text[m.start()+1:] 33 | 34 | ############################################################################### 35 | ### COUNTING 36 | ############################################################################### 37 | 38 | def count_words(words): 39 | "Count the number of times each word has appeared." 40 | wordcounts = {} 41 | for word in words: 42 | if word not in wordcounts: 43 | wordcounts[word] = 0 44 | wordcounts[word] += 1 45 | return wordcounts 46 | 47 | def print_freq(counts, num=25): 48 | "Print the words and their counts, in order of decreasing frequency." 49 | from operator import itemgetter 50 | total = sum(counts.values()) 51 | cumulative = 0.0 52 | sorted_word_counts = sorted(counts.items(), key=itemgetter(1), reverse=True) 53 | for i in range(num): 54 | word, count = sorted_word_counts[i] 55 | cumulative += count * 100.0 / total 56 | print("%3d %3.2d%% %s" % (i, cumulative, word)) 57 | 58 | ############################################################################### 59 | ### COLLOCATIONS 60 | ############################################################################### 61 | 62 | def count_pairs(words, num=50): 63 | "Print the frequent bigrams, omitting short words" 64 | paircounts = {} 65 | for i in range(len(words)-1): 66 | if len(words[i]) > 4 and len(words[i+1]) > 4: 67 | pair = words[i] + ' ' + words[i+1] 68 | if pair not in paircounts: 69 | paircounts[pair] = 0 70 | paircounts[pair] += 1 71 | return paircounts 72 | 73 | ############################################################################### 74 | ### RANDOM TEXT GENERATION 75 | ############################################################################### 76 | 77 | def train(words): 78 | prev1 = '' 79 | prev2 = '' 80 | model = defaultdict(list) 81 | for word in words: 82 | key = (prev1, prev2) 83 | if word not in model[key]: 84 | model[key].append(word) 85 | model[prev2].append(word) 86 | prev2 = prev1 87 | prev1 = word 88 | return model 89 | 90 | def generate(model, num=100): 91 | prev2 = '' 92 | prev1 = '' 93 | for i in range(num): 94 | next = model[(prev1,prev2)] 95 | if next: 96 | word = random.choice(next) 97 | else: 98 | word = random.choice(model[prev2]) 99 | print(word, end='') 100 | prev2 = prev1 101 | prev1 = word 102 | print() 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/glue_event.semtype: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # Glue Semantics Formulas Using Event Representation 3 | # 4 | # Entries are made up of three parts, separated by colons (":") 5 | # 6 | # 1) The semtype name. 7 | # - May appear multiple times with different relationship sets (3) 8 | # - May "extend" other semtypes: "type(parent)" 9 | # 10 | # 2) The glue formulas. 11 | # - A comma-separated list of tuples representing glue formulas 12 | # - If the entry is an extension, then the listed formulas will be added to 13 | # the list from the super type 14 | # 15 | # 3) The relationship set (OPTIONAL) 16 | # - If not specified, then assume the entry covers ALL relationship sets 17 | # - If the entry is an extension, then the relationship set dictates which 18 | # particular entry should be extended. If no relationship set is 19 | # specified, then every entry of the parent type is extended. 20 | # 21 | ######################################################################## 22 | 23 | #Quantifiers 24 | def_art : (\P Q.exists x.(P(x) & all y.(Q(y) <-> (x = y))), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 25 | ex_quant : (\P Q.exists x.(P(x) & Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 26 | univ_quant : (\P Q.all x.(P(x) -> Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 27 | no_quant : (\P Q.-exists x.(P(x) & Q(x)), ((super.v -o super.r) -o ((super.f -o super.var) -o super.var))) 28 | 29 | #Nouns 30 | NN : (\x.(x), (v -o r)) : [spec] 31 | NN : (\P Q e.exists x.(P(x) & Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.(x), (v -o r)) : [] # treat a noun missing its spec as implicitly existentially quantified 32 | NNP : (\P Q e.exists x.(P(x) & Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.(x), (v -o r)) 33 | NNS(NN) 34 | PRP : (\P Q e.exists x.(P(x) & Q(x,e)), ((v -o r) -o ((f -o var) -o var))), (\x.PRO(x), (v -o r)) 35 | 36 | #Verbs 37 | VB : (\x e.((e) & subj(e,x)), (subj -o f)) : [subj] #iv 38 | VB : (\x y e.((e) & subj(e,x) & obj(e,y)), (subj -o (obj -o f))) : [subj, obj] #tv 39 | VB : (\y e.exists x.((e) & subj(e,x) & obj(e,y)), (obj -o f)) : [obj] #incomplete tv 40 | VB : (\x y z e.((e) & subj(e,x) & obj(e,y) & theme(e,z)), (subj -o (obj -o (theme -o f)))) : [subj, obj, theme] #dtv 41 | VB : (\y z e.exists x.((e) & subj(e,x) & obj(e,y) & theme(e,z)), obj -o (theme -o f)) : [obj, theme] #incomplete dtv 42 | VB : (\x z e.exists y.((e) & subj(e,x) & obj(e,y) & theme(e,z)), subj -o (theme -o f)) : [subj, theme] #incomplete dtv 43 | VB : (\z e.exists x y.((e) & subj(e,x) & obj(e,y) & theme(e,z)), theme -o f) : [theme] #incomplete dtv 44 | VB : (\x y e.((e) & subj(e,x) & comp(e,y) & P(e)), (subj -o (comp -o f))) : [subj, comp] #tv_comp 45 | VB : (\x P e.((e) & subj(e,x) & xcomp(e,P)), (subj -o ((xcomp.subj -o xcomp) -o f))) : [subj, xcomp] #equi 46 | VB : (\x y P e.((e) & subj(e,x) & obj(e,y) & (xcomp e P)), (subj -o (obj -o ((xcomp.subj -o xcomp) -o f)))) : [subj, obj, xcomp] # object equi 47 | VB : (\P e.((e) & xcomp(e,P)), (xcomp -o f)) : [xcomp] #raising 48 | VBD(VB) : (\P.PAST(P), (f -o f)) 49 | VBZ(VB) 50 | 51 | #Auxillary Verbs 52 | MD : (\P Q e1.P(\x e2.((e2,x) & Q(e2)),e1), ((subj -o subj.var) -o subj.var) -o (main -o main)) : [subj] 53 | 54 | #Modifiers 55 | nmod : (\Q P x.(P(x) & Q(x)), (f -o ((super.v -o super.r) -o (super.v -o super.r)))), (\x.(x), f) : [] 56 | JJ(nmod) : [] 57 | vmod : (\P.(P), (super.f -o super.f)) : [] 58 | RB(vmod) : [] 59 | tense : (\P.(P), (super.f -o super.f)) : [] 60 | 61 | #Prepositions 62 | IN : (\P Q e1.P(\x e2.((e2,x) & Q(e2)),e1), ((subj -o subj.var) -o subj.var) -o (super -o super)) : [subj] 63 | IN(vmod) : [] 64 | 65 | #Conjunctions 66 | cc_clause : (\P Q.(P & Q), (a -o (b -o f))) 67 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/chat80.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: chat80.fcfg 2 | ## 3 | ## 4 | ## Grammar used to illustrate querying the Chat-80 database. 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | # ########################### 12 | # Grammar Rules 13 | # ############################ 14 | 15 | S[SEM=] -> NP[-PRED,NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] 16 | 17 | Rel[NUM=?n,SEM=] -> Comp[SEM=?comp] VP[NUM=?n,SEM=?vp] 18 | 19 | NP[-PRED, NUM=pl,SEM=<(\P Q. exists x. (Q(x) and P(x)) ?nom)>] -> Nom[NUM=pl,SEM=?nom] 20 | NP[WH=?wh,-PRED,NUM=?n,SEM=] -> Det[WH=?wh, NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] 21 | 22 | 23 | NP[+PRED,NUM=sg,SEM=?nom] -> Det[NUM=sg,SEM=?det] Nom[NUM=sg,SEM=?nom] 24 | NP[+PRED,NUM=pl,SEM=?nom] -> Nom[NUM=pl,SEM=?nom] 25 | 26 | NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] 27 | 28 | Nom[NUM=?n,SEM=?nom] -> N[NUM=?n,SEM=?nom] 29 | Nom[NUM=sg,SEM=] -> N[subcat=11,NUM=sg,SEM=?nom] PP[pform=of,SEM=?pp] 30 | Nom[NUM=?n,SEM=] -> Nom[NUM=?n,SEM=?nom] Rel[NUM=?n,SEM=?mod] 31 | Nom[NUM=?n,SEM=] -> A[SEM=?adj] Nom[NUM=?n,SEM=?nom] 32 | 33 | ##VP[NUM=?n,SEM=?v] -> V[SUBCAT=1,NUM=?n,SEM=?v] 34 | VP[NUM=?n,SEM=] -> V[SUBCAT=2, NUM=?n,SEM=?v] NP[-PRED,SEM=?obj] 35 | VP[NUM=?n,SEM=] -> V[SUBCAT=3, NUM=?n,SEM=?v] NP[+PRED,SEM=?PRED] 36 | 37 | PP[PFORM=?pf,SEM=] -> P[PFORM=?pf, LOC=?l,SEM=?p] NP[LOC=?l,SEM=?np] 38 | 39 | 40 | # ############################ 41 | # Lexical Rules 42 | # ############################ 43 | 44 | % include chat_pnames.cfg 45 | 46 | Comp[SEM=<\P Q x.(P(x) and Q(x))>] -> 'that' 47 | 48 | NP[+WH, NUM=sg, SEM=<\P.\x.P(x)>] -> 'what' 49 | 50 | Det[-WH,NUM=sg,SEM=<\P Q. all x. (P(x) -> Q(x))>] -> 'every' 51 | Det[-WH,NUM=pl,SEM=<\P Q. all x. (P(x) -> Q(x))>] -> 'all' 52 | Det[-WH,SEM=<\P Q. exists x. (P(x) & Q(x))>] -> 'some' 53 | Det[-WH,NUM=sg,SEM=<\P Q. exists x. (P(x) & Q(x))>] -> 'a' 54 | Det[-WH,NUM=sg,SEM=<\P Q. exists x. (P(x) & Q(x))>] -> 'the' 55 | Det[+WH,SEM=<\P Q x. (Q(x) & P(x))>] -> 'which' 56 | 57 | N[SUBCAT=10,NUM=sg,SEM=<\x.city(x)>] -> 'city' 58 | N[SUBCAT=10,NUM=pl,SEM=<\x.city(x)>] -> 'cities' 59 | N[SUBCAT=10,NUM=sg,SEM=<\x.continent(x)>] -> 'continent' 60 | N[SUBCAT=10,NUM=pl,SEM=<\x.continent(x)>] -> 'continents' 61 | N[SUBCAT=10,NUM=sg,SEM=<\x.country(x)>] -> 'country' 62 | N[SUBCAT=10,NUM=pl,SEM=<\x.country(x)>] -> 'countries' 63 | N[SUBCAT=10,NUM=sg,SEM=<\x.sea(x)>] -> 'sea' 64 | N[SUBCAT=10,NUM=pl,SEM=<\x.sea(x)>] -> 'seas' 65 | N[SUBCAT=10,NUM=sg,SEM=<\x.ocean(x)>] -> 'ocean' 66 | N[SUBCAT=10,NUM=pl,SEM=<\x.ocean(x)>] -> 'oceans' 67 | 68 | PL[SEM=<\P Q. exists x. (P(x) & Q(x))>] -> ' ' 69 | 70 | N[SUBCAT=11,NUM=sg,SEM=<\x y.area_of(x,y))>] -> 'area' 71 | N[SUBCAT=11,NUM=sg,SEM=<\x y.capital_of(x,y))>] -> 'capital' 72 | N[SUBCAT=11,NUM=sg,SEM=<\x y.currency_of(x,y))>] -> 'currency' 73 | N[SUBCAT=11,NUM=sg,SEM=<\x y.region_of(x,y))>] -> 'region' 74 | N[SUBCAT=11,NUM=sg,SEM=<\x y.longitude_of(x,y))>] -> 'longitude' 75 | N[SUBCAT=11,NUM=sg,SEM=<\x y.latitude_of(x,y))>] -> 'latitude' 76 | N[SUBCAT=11,NUM=sg,SEM=<\x y.population_of(x,y))>] -> 'population' 77 | 78 | 79 | ## V[SUBCAT=3,NUM=sg,SEM=<\X y.(X \x.(x = y))>,tns=pres] -> 'is' 80 | ## V[SUBCAT=3,NUM=pl,SEM=<\P.P))>,tns=pres] -> 'are' 81 | V[SUBCAT=3,NUM=sg,SEM=<\P.P>,tns=pres] -> 'is' 82 | V[SUBCAT=3,NUM=pl,SEM=<\P.P>,tns=pres] -> 'are' 83 | V[SUBCAT=2,NUM=sg,SEM=<\X y.(X \x.border(y,x))>,tns=pres] -> 'borders' 84 | V[SUBCAT=2,NUM=pl,SEM=<\X y.(X \x.border(y,x))>,tns=pres] -> 'border' 85 | V[SUBCAT=2,NUM=sg,SEM=<\X y.(X \x.contain(y,x))>,tns=pres] -> 'contains' 86 | V[SUBCAT=2,NUM=pl,SEM=<\X y.(X \x.contain(y,x))>,tns=pres] -> 'contain' 87 | 88 | A[SEM=<\P x.(contain(asia,x) & P(x))>] -> 'Asian' 89 | 90 | P[PFORM=of,SEM=<\X.X>] -> 'of' 91 | P[+LOC,SEM=<\X P x.(X \y.(P(x) & in(x,y)))>] -> 'in' 92 | P[-LOC,SEM=<\X P x.(X \y.(P(x) & with(x,y)))>] -> 'with' 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /examples/semantics/chat80.cfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: chat80.cfg 2 | ## 3 | ## 4 | ## Grammar used to illustrate querying the Chat-80 database. 5 | ## 6 | ## Author: Ewan Klein 7 | ## URL: 8 | ## For license information, see LICENSE.TXT 9 | 10 | % start S 11 | # ########################### 12 | # Grammar Rules 13 | # ############################ 14 | 15 | S[sem=] -> NP[-pred,num=?n,sem=?subj] VP[num=?n,sem=?vp] 16 | 17 | Rel[num=?n,sem=] -> Comp[sem=?comp] VP[num=?n,sem=?vp] 18 | 19 | NP[-pred, num=pl,sem=<(\P Q. some x. ((Q x) and (P x)) ?nom)>] -> Nom[num=pl,sem=?nom] 20 | NP[wh=?wh,-pred,num=?n,sem=] -> Det[wh=?wh, num=?n,sem=?det] Nom[num=?n,sem=?nom] 21 | 22 | 23 | NP[+pred,num=sg,sem=?nom] -> Det[num=sg,sem=?det] Nom[num=sg,sem=?nom] 24 | NP[+pred,num=pl,sem=?nom] -> Nom[num=pl,sem=?nom] 25 | 26 | NP[loc=?l,num=?n,sem=?np] -> PropN[loc=?l,num=?n,sem=?np] 27 | 28 | Nom[num=?n,sem=?nom] -> N[num=?n,sem=?nom] 29 | Nom[num=sg,sem=] -> N[subcat=11,num=sg,sem=?nom] PP[pform=of,sem=?pp] 30 | Nom[num=?n,sem=] -> Nom[num=?n,sem=?nom] Rel[num=?n,sem=?mod] 31 | Nom[num=?n,sem=] -> A[sem=?adj] Nom[num=?n,sem=?nom] 32 | 33 | ##VP[num=?n,sem=?v] -> V[subcat=1,num=?n,sem=?v] 34 | VP[num=?n,sem=] -> V[subcat=2, num=?n,sem=?v] NP[-pred,sem=?obj] 35 | VP[num=?n,sem=] -> V[subcat=3, num=?n,sem=?v] NP[+pred,sem=?pred] 36 | 37 | PP[pform=?pf,sem=] -> P[pform=?pf, loc=?l,sem=?p] NP[loc=?l,sem=?np] 38 | 39 | 40 | # ############################ 41 | # Lexical Rules 42 | # ############################ 43 | 44 | % include chat_pnames.cfg 45 | 46 | Comp[sem=<\P Q x. ((P x) and (Q x))>] -> 'that' 47 | 48 | NP[+wh, num=sg, sem=<\P.\x.(P x)>] -> 'what' 49 | 50 | Det[-wh,num=sg,sem=<\P Q. all x. ((P x) implies (Q x))>] -> 'every' 51 | Det[-wh,num=pl,sem=<\P Q. all x. ((P x) implies (Q x))>] -> 'all' 52 | Det[-wh,sem=<\P Q. some x. ((P x) and (Q x))>] -> 'some' 53 | Det[-wh,num=sg,sem=<\P Q. some x. ((P x) and (Q x))>] -> 'a' 54 | Det[-wh,num=sg,sem=<\P Q. some x. ((P x) and (Q x))>] -> 'the' 55 | Det[+wh,sem=<\P Q x. ((Q x) and (P x))>] -> 'which' 56 | 57 | N[subcat=10,num=sg,sem=] -> 'city' 58 | N[subcat=10,num=pl,sem=] -> 'cities' 59 | N[subcat=10,num=sg,sem=] -> 'continent' 60 | N[subcat=10,num=pl,sem=] -> 'continents' 61 | N[subcat=10,num=sg,sem=] -> 'country' 62 | N[subcat=10,num=pl,sem=] -> 'countries' 63 | N[subcat=10,num=sg,sem=] -> 'sea' 64 | N[subcat=10,num=pl,sem=] -> 'seas' 65 | N[subcat=10,num=sg,sem=] -> 'ocean' 66 | N[subcat=10,num=pl,sem=] -> 'oceans' 67 | 68 | PL[sem=<\P Q. some x. ((P x) and (Q x))>] -> ' ' 69 | 70 | N[subcat=11,num=sg,sem=<\x y. (area_of y x))>] -> 'area' 71 | N[subcat=11,num=sg,sem=<\x y. (capital_of y x))>] -> 'capital' 72 | N[subcat=11,num=sg,sem=<\x y. (currency_of y x))>] -> 'currency' 73 | N[subcat=11,num=sg,sem=<\x y. (region_of y x))>] -> 'region' 74 | N[subcat=11,num=sg,sem=<\x y. (longitude_of y x))>] -> 'longitude' 75 | N[subcat=11,num=sg,sem=<\x y. (latitude_of y x))>] -> 'latitude' 76 | N[subcat=11,num=sg,sem=<\x y. (population_of y x))>] -> 'population' 77 | 78 | 79 | 80 | ## V[subcat=3,num=sg,sem=<\X y. (X \x. (x = y))>,tns=pres] -> 'is' 81 | ## V[subcat=3,num=pl,sem=<\P. P))>,tns=pres] -> 'are' 82 | V[subcat=3,num=sg,sem=<\P. P>,tns=pres] -> 'is' 83 | V[subcat=3,num=pl,sem=<\P. P>,tns=pres] -> 'are' 84 | V[subcat=2,num=sg,sem=<\X y. (X \x. (border x y))>,tns=pres] -> 'borders' 85 | V[subcat=2,num=pl,sem=<\X y. (X \x. (border x y))>,tns=pres] -> 'border' 86 | V[subcat=2,num=sg,sem=<\X y. (X \x. (contain x y))>,tns=pres] -> 'contains' 87 | V[subcat=2,num=pl,sem=<\X y. (X \x. (contain x y))>,tns=pres] -> 'contain' 88 | 89 | A[sem=<\P x. ((contain x asia) and (P x))>] -> 'Asian' 90 | 91 | P[pform=of,sem=<\X.X>] -> 'of' 92 | P[+loc,sem=<\X P x. (X \y. ((P x) and (in y x)))>] -> 'in' 93 | P[-loc,sem=<\X P x. (X \y. ((P x) and (with y x)))>] -> 'with' 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/drt.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: drt.fcfg 2 | ## 3 | ## Author: Dan Garrette 4 | ## URL: 5 | ## For license information, see LICENSE.TXT 6 | 7 | % start S 8 | ############################ 9 | # Grammar Rules 10 | ############################# 11 | 12 | S[SEM = ] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] 13 | 14 | NP[NUM=?n,SEM= ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] 15 | NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] 16 | 17 | Nom[NUM=?n,SEM=?nom] -> N[NUM=?n,SEM=?nom] 18 | Nom[NUM=?n,SEM=] -> N[NUM=?n,SEM=?nom] PP[SEM=?pp] 19 | 20 | VP[NUM=?n,SEM=?v] -> IV[NUM=?n,SEM=?v] 21 | VP[NUM=?n,SEM=] -> TV[NUM=?n,SEM=?v] NP[SEM=?obj] 22 | 23 | ############################# 24 | # Lexical Rules 25 | ############################# 26 | 27 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Angus(x)])+P(x))>] -> 'Angus' 28 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Irene(x)])+P(x))>] -> 'Irene' 29 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[John(x)])+P(x))>] -> 'John' 30 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Mary(x)])+P(x))>] -> 'Mary' 31 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Suzie(x)])+P(x))>] -> 'Suzie' 32 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Vincent(x)])+P(x))>] -> 'Vincent' 33 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Mia(x)])+P(x))>] -> 'Mia' 34 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Marsellus(x)])+P(x))>] -> 'Marsellus' 35 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[Fido(x)])+P(x))>] -> 'Fido' 36 | PropN[+LOC,NUM=sg,SEM=<\P.(DRS([x],[Noosa(x)])+P(x))>] -> 'Noosa' 37 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[PRO(x)])+P(x))>] -> 'he' 38 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[PRO(x)])+P(x))>] -> 'she' 39 | PropN[-LOC,NUM=sg,SEM=<\P.(DRS([x],[PRO(x)])+P(x))>] -> 'it' 40 | 41 | Det[NUM=sg,SEM=<\P Q.DRS([],[((DRS([x],[])+P(x)) implies Q(x))])>] -> 'every' | 'Every' 42 | Det[NUM=pl,SEM=<\P Q.DRS([],[((DRS([x],[])+P(x)) implies Q(x))])>] -> 'all' | 'All' 43 | Det[SEM=<\P Q.((DRS([x],[])+P(x))+Q(x))>] -> 'some' | 'Some' 44 | Det[NUM=sg,SEM=<\P Q.((DRS([x],[])+P(x))+Q(x))>] -> 'a' | 'A' 45 | Det[NUM=sg,SEM=<\P Q.(not ((DRS([x],[])+P(x))+Q(x)))>] -> 'no' | 'No' 46 | 47 | N[NUM=sg,SEM=<\x.DRS([],[boy(x)])>] -> 'boy' 48 | N[NUM=pl,SEM=<\x.DRS([],[boy(x)])>] -> 'boys' 49 | N[NUM=sg,SEM=<\x.DRS([],[girl(x)])>] -> 'girl' 50 | N[NUM=pl,SEM=<\x.DRS([],[girl(x)])>] -> 'girls' 51 | N[NUM=sg,SEM=<\x.DRS([],[dog(x)])>] -> 'dog' 52 | N[NUM=pl,SEM=<\x.DRS([],[dog(x)])>] -> 'dogs' 53 | N[NUM=sg,SEM=<\x.DRS([],[student(x)])>] -> 'student' 54 | N[NUM=pl,SEM=<\x.DRS([],[student(x)])>] -> 'students' 55 | N[NUM=sg,SEM=<\x.DRS([],[person(x)])>] -> 'person' 56 | N[NUM=pl,SEM=<\x.DRS([],[person(x)])>] -> 'persons' 57 | N[NUM=sg,SEM=<\x.DRS([],[boxerdog(x)])>] -> 'boxer' 58 | N[NUM=pl,SEM=<\x.DRS([],[boxerdog(x)])>] -> 'boxers' 59 | N[NUM=sg,SEM=<\x.DRS([],[boxer(x)])>] -> 'boxer' 60 | N[NUM=pl,SEM=<\x.DRS([],[boxer(x)])>] -> 'boxers' 61 | N[NUM=sg,SEM=<\x.DRS([],[garden(x)])>] -> 'garden' 62 | N[NUM=sg,SEM=<\x.DRS([],[kitchen(x)])>] -> 'kitchen' 63 | 64 | IV[NUM=sg,SEM=<\x.DRS([],[bark(x)])>,tns=pres] -> 'barks' 65 | IV[NUM=pl,SEM=<\x.DRS([],[bark(x)])>,tns=pres] -> 'bark' 66 | IV[NUM=sg,SEM=<\x.DRS([],[walk(x)])>,tns=pres] -> 'walks' 67 | IV[NUM=pl,SEM=<\x.DRS([],[walk(x)])>,tns=pres] -> 'walk' 68 | IV[NUM=pl,SEM=<\x.DRS([],[dance(x)])>,tns=pres] -> 'dance' 69 | IV[NUM=sg,SEM=<\x.DRS([],[dance(x)])>,tns=pres] -> 'dances' 70 | 71 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[own(x,y)]))>,tns=pres] -> 'owns' 72 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[own(x,y)]))>,tns=pres] -> 'own' 73 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[bite(x,y)]))>,tns=pres] -> 'bites' 74 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[bite(x,y)]))>,tns=pres] -> 'bite' 75 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[chase(x,y)]))>,tns=pres] -> 'chases' 76 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[chase(x,y)]))>,tns=pres] -> 'chase' 77 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[marry(x,y)]))>,tns=pres] -> 'marries' 78 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[marry(x,y)]))>,tns=pres] -> 'marry' 79 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[know(x,y)]))>,tns=pres] -> 'knows' 80 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[know(x,y)]))>,tns=pres] -> 'know' 81 | TV[NUM=sg,SEM=<\X x.X(\y.DRS([],[see(x,y)]))>,tns=pres] -> 'sees' 82 | TV[NUM=pl,SEM=<\X x.X(\y.DRS([],[see(x,y)]))>,tns=pres] -> 'see' 83 | -------------------------------------------------------------------------------- /examples/semantics/syn2sem.py: -------------------------------------------------------------------------------- 1 | # Natural Language Toolkit: Parsers 2 | # 3 | # Author: Ewan Klein 4 | # URL: 5 | # For license information, see LICENSE.TXT 6 | 7 | from __future__ import print_function 8 | 9 | """ 10 | Demo of how to combine the output of parsing with evaluation in a model. 11 | Use 'python syn2sem.py -h' to find out the various options. 12 | 13 | Note that this demo currently processes the whole input file 14 | before delivering any results, consequently there may be a significant initial delay. 15 | """ 16 | 17 | from nltk.semantics import * 18 | 19 | 20 | def read_sents(file): 21 | sents = [l.rstrip() for l in open(file)] 22 | # get rid of blank lines 23 | sents = [l for l in sents if len(l) > 0] 24 | sents = [l for l in sents if not l[0] == '#'] 25 | return sents 26 | 27 | def demo(): 28 | import sys 29 | from optparse import OptionParser 30 | description = \ 31 | """ 32 | Parse and evaluate some sentences. 33 | """ 34 | 35 | opts = OptionParser(description=description) 36 | 37 | opts.set_defaults(evaluate=True, beta=True, syntrace=0, 38 | semtrace=0, demo='default', grammar='', sentences='') 39 | 40 | opts.add_option("-d", "--demo", dest="demo", 41 | help="choose demo D; omit this for the default demo, or specify 'chat80'", metavar="D") 42 | opts.add_option("-g", "--gram", dest="grammar", 43 | help="read in grammar G", metavar="G") 44 | opts.add_option("-m", "--model", dest="model", 45 | help="import model M (omit '.py' suffix)", metavar="M") 46 | opts.add_option("-s", "--sentences", dest="sentences", 47 | help="read in a file of test sentences S", metavar="S") 48 | opts.add_option("-e", "--no-eval", action="store_false", dest="evaluate", 49 | help="just do a syntactic analysis") 50 | opts.add_option("-b", "--no-beta-reduction", action="store_false", 51 | dest="beta", help="don't carry out beta-reduction") 52 | opts.add_option("-t", "--syntrace", action="count", dest="syntrace", 53 | help="set syntactic tracing on; requires '-e' option") 54 | opts.add_option("-T", "--semtrace", action="count", dest="semtrace", 55 | help="set semantic tracing on") 56 | 57 | 58 | 59 | (options, args) = opts.parse_args() 60 | 61 | SPACER = '-' * 30 62 | 63 | 64 | 65 | if options.demo == 'chat80': 66 | import model1 as model 67 | sentsfile = 'chat_sentences' 68 | gramfile = 'chat80.cfg' 69 | else: 70 | import model0 as model 71 | sentsfile = 'demo_sentences' 72 | gramfile = 'sem2.cfg' 73 | 74 | if options.sentences: 75 | sentsfile = options.sentences 76 | if options.grammar: 77 | gramfile = options.grammar 78 | if options.model: 79 | exec "import %s as model" % options.model 80 | 81 | sents = read_sents(sentsfile) 82 | 83 | # NB. GrammarFile is imported indirectly via nltk.semantics 84 | gram = GrammarFile.read_file(gramfile) 85 | 86 | m = model.m 87 | g = model.g 88 | 89 | if options.evaluate: 90 | evaluations = \ 91 | text_evaluate(sents, gram, m, g, semtrace=options.semtrace) 92 | else: 93 | semreps = \ 94 | text_interpret(sents, gram, beta_reduce=options.beta, syntrace=options.syntrace) 95 | 96 | for sent in sents: 97 | n = 1 98 | print('\nSentence: %s' % sent) 99 | print(SPACER) 100 | if options.evaluate: 101 | 102 | for (syntree, semrep, value) in evaluations[sent]: 103 | if isinstance(value, dict): 104 | value = set(value.keys()) 105 | print('%d: %s' % (n, semrep.infixify())) 106 | print(value) 107 | n += 1 108 | else: 109 | 110 | for (syntree, semrep) in semreps[sent]: 111 | print('%d: %s' % (n, semrep.infixify())) 112 | n += 1 113 | 114 | if __name__ == "__main__": 115 | demo() 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /examples/grammars/book_grammars/discourse.fcfg: -------------------------------------------------------------------------------- 1 | ## Natural Language Toolkit: discourse.fcfg 2 | ## 3 | ## Grammar to illustrate simple 2-3 sentence discourse processing. 4 | ## 5 | ## Developed as an extension of sem3.fcfg 6 | ## Main additions: 7 | ## - a few more lexical entries (including 'no' and 'the') 8 | ## - 'is', 'does' and auxiliary negation 9 | ## - Predicate categories, including predicate nominals and adjectives 10 | ## 11 | ## Author: Ewan Klein 12 | ## URL: 13 | ## For license information, see LICENSE.TXT 14 | 15 | % start S 16 | ############################ 17 | # Grammar Rules 18 | ############################# 19 | 20 | S[SEM = ] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] 21 | 22 | NP[NUM=?n,SEM= ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] 23 | NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] 24 | 25 | NP[-LOC,NUM=sg,SEM=<\Q. (- exists x. (person(x) & Q(x)))>] -> 'nobody' | 'Nobody' 26 | NP[-LOC,NUM=sg,SEM=<\Q. exists x. (person(x) & Q(x))>] -> 'somebody' | 'Somebody' 27 | 28 | ## Copular predicates 29 | Pred[SEM=?prd] -> PredN[SEM=?prd] | PP[+LOC,+PRED,SEM=?prd] | Adj[SEM=?prd] 30 | 31 | ## Predicative NPs 32 | ## Doesn't bLOCk 'is every dog', but determiner SEMantics is ignored 33 | PredN[NUM=?n, SEM=?nom] -> Det[NUM=?n] Nom[NUM=?n, SEM=?nom] 34 | 35 | Nom[NUM=?n,SEM=?nom] -> N[NUM=?n,SEM=?nom] 36 | Nom[NUM=?n,SEM=] -> N[NUM=?n,SEM=?nom] PP[SEM=?pp] 37 | 38 | ## Transitive verbs 39 | VP[NUM=?n,SEM=] -> TV[NUM=?n,SEM=?v] NP[SEM=?obj] 40 | 41 | ## Copular VPs 42 | VP[NUM=?n,SEM=] -> AuxP[+COP,NUM=?n,SEM=?v] Pred[SEM=?prd] 43 | 44 | ## Do auxiliaries 45 | VP[+neg,NUM=?n,SEM=] -> AuxP[-COP,NUM=?n,SEM=?v] VP[NUM=pl,SEM=?vp] 46 | 47 | AuxP[COP=?c,NUM=?n,SEM=] -> Aux[COP=?c,NUM=?n,SEM=?aux] Neg[SEM=?neg] 48 | AuxP[COP=?c,NUM=?n,SEM=?aux] -> Aux[COP=?c,NUM=?n,SEM=?aux] 49 | 50 | ## Intransitive verbs 51 | VP[NUM=?n,SEM=?v] -> IV[NUM=?n,SEM=?v] 52 | 53 | ## VP-level PPs 54 | VP[NUM=?n,SEM=] -> VP[NUM=?n,SEM=?vp] PP[-PRED,SEM=?pp] 55 | 56 | PP[LOC=?l,PRED=?prd,SEM=] -> P[LOC=?l,PRED=?prd,SEM=?p] NP[LOC=?l,SEM=?np] 57 | 58 | ############################# 59 | # Lexical Rules 60 | ############################# 61 | 62 | PropN[-LOC,NUM=sg,SEM=<\P.P(John)>] -> 'John' 63 | PropN[-LOC,NUM=sg,SEM=<\P.P(Mary)>] -> 'Mary' 64 | PropN[-LOC,NUM=sg,SEM=<\P.P(Suzie)>] -> 'Suzie' 65 | PropN[-LOC,NUM=sg,SEM=<\P.P(Vincent)>] -> 'Vincent' 66 | PropN[-LOC,NUM=sg,SEM=<\P.P(Mia)>] -> 'Mia' 67 | PropN[-LOC,NUM=sg,SEM=<\P.P(Marsellus)>] -> 'Marsellus' 68 | PropN[-LOC,NUM=sg,SEM=<\P.P(Fido)>] -> 'Fido' 69 | PropN[+LOC, NUM=sg,SEM=<\P.P(Noosa)>] -> 'Noosa' 70 | 71 | NP[-LOC, NUM=sg, SEM=<\P.\x.P(x)>] -> 'who' | 'Who' 72 | 73 | Det[NUM=sg,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'every' | 'Every' 74 | Det[NUM=pl,SEM=<\P Q.all x.(P(x) -> Q(x))>] -> 'all' | 'All' 75 | Det[SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'some' | 'Some' 76 | Det[NUM=sg,SEM=<\P Q.exists x.(P(x) & Q(x))>] -> 'a' | 'A' 77 | Det[NUM=sg,SEM=<\P Q.(- exists x.(P(x) & Q(x)))>] -> 'no' | 'No' 78 | Det[NUM=sg,SEM=<\P Q.exists x.((P(x) & Q(x)) & all y.(P(y) -> (x = y)))>] -> 'the' | 'The' 79 | 80 | N[NUM=sg,SEM=<\x.boy(x)>] -> 'boy' 81 | N[NUM=pl,SEM=<\x.boy(x)>] -> 'boys' 82 | N[NUM=sg,SEM=<\x.girl(x)>] -> 'girl' 83 | N[NUM=pl,SEM=<\x.girl(x)>] -> 'girls' 84 | N[NUM=sg,SEM=<\x.dog(x)>] -> 'dog' 85 | N[NUM=pl,SEM=<\x.dog(x)>] -> 'dogs' 86 | N[NUM=sg,SEM=<\x.student(x)>] -> 'student' 87 | N[NUM=pl,SEM=<\x.student(x)>] -> 'students' 88 | N[NUM=sg,SEM=<\x.person(x)>] -> 'person' 89 | N[NUM=pl,SEM=<\x.person(x)>] -> 'persons' 90 | N[NUM=sg,SEM=<\x.boxerdog(x)>] -> 'boxer' 91 | N[NUM=pl,SEM=<\x.boxerdog(x)>] -> 'boxers' 92 | N[NUM=sg,SEM=<\x.boxer(x)>] -> 'boxer' 93 | N[NUM=pl,SEM=<\x.boxer(x)>] -> 'boxers' 94 | N[NUM=sg,SEM=<\x.garden(x)>] -> 'garden' 95 | N[NUM=sg,SEM=<\x.kitchen(x)>] -> 'kitchen' 96 | 97 | Adj[SEM=<\x.happy(x)>] -> 'happy' 98 | Adj[SEM=<\x.drunk(x)>] -> 'drunk' 99 | Adj[SEM=<\x.married(x)>] -> 'married' 100 | 101 | TV[NUM=sg,SEM=<\X y.X(\x.chase(y,x))>,tns=pres] -> 'chases' 102 | TV[NUM=pl,SEM=<\X y.X(\x.chase(y,x))>,tns=pres] -> 'chase' 103 | TV[NUM=sg,SEM=<\X y.X(\x.marry(y,x))>,tns=pres] -> 'marries' 104 | TV[NUM=pl,SEM=<\X y.X(\x.marry(y,x))>,tns=pres] -> 'marry' 105 | TV[NUM=sg,SEM=<\X y.X(\x.know(y,x))>,tns=pres] -> 'knows' 106 | TV[NUM=pl,SEM=<\X y.X(\x.know(y,x))>,tns=pres] -> 'know' 107 | TV[NUM=sg,SEM=<\X y.X(\x.see(y,x))>,tns=pres] -> 'sees' 108 | TV[NUM=pl,SEM=<\X y.X(\x.see(y,x))>,tns=pres] -> 'see' 109 | IV[NUM=sg,SEM=<\x.bark(x)>,tns=pres] -> 'barks' 110 | IV[NUM=pl,SEM=<\x.bark(x)>,tns=pres] -> 'bark' 111 | IV[NUM=sg,SEM=<\x.walk(x)>,tns=pres] -> 'walks' 112 | IV[NUM=pl,SEM=<\x.walk(x)>,tns=pres] -> 'walk' 113 | IV[NUM=pl,SEM=<\x.dance(x)>,tns=pres] -> 'dance' 114 | IV[NUM=sg,SEM=<\x.dance(x)>,tns=pres] -> 'dances' 115 | 116 | Aux[+COP,NUM=sg,SEM=<\P x.P(x)>,tns=pres] -> 'is' 117 | Aux[+COP,NUM=pl,SEM=<\P x.P(x)>,tns=pres] -> 'are' 118 | Aux[-COP,NUM=sg,SEM=<\P x.P(x)>,tns=pres] -> 'does' 119 | Aux[-COP,NUM=pl,SEM=<\P x.P(x)>,tns=pres] -> 'do' 120 | 121 | P[+LOC,-PRED,SEM=<\X P x.X(\y.(P(x) & in(x,y)))>] -> 'in' 122 | P[+LOC,+PRED,SEM=<\X x.X(\y.in(x,y))>] -> 'in' 123 | P[-LOC,SEM=<\X P x.X(\y.(P(x) & with(x,y)))>] -> 'with' 124 | 125 | Neg[SEM=<\T P.T(\x.(- P(x)))>] -> 'not' 126 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/gluesemantics.fcfg: -------------------------------------------------------------------------------- 1 | % start S 2 | 3 | ############################# 4 | # Grammar Rules 5 | ############################# 6 | 7 | # S expansion rules 8 | S -> NP[num=?n, case=nom] VP[num=?n] 9 | S -> S CC[sem=cc_clause] S 10 | 11 | # NP expansion rules 12 | NP[num=?n, gender=?g] -> Det[num=?n] N[num=?n, gender=?g] 13 | NP[num=?n, gender=?g] -> PropN[num=?n, gender=?g] 14 | NP[num=?n, case=?c, gender=?g] -> Pro[num=?n, case=?c, gender=?g] 15 | NP[num=pl, gender=?g] -> N[num=pl] 16 | NP[num=?n, gender=?g] -> NP[num=?n, gender=?g] PP 17 | NP[num=pl] -> NP CC[sem=cc_np] NP 18 | 19 | # N's can have Adjectives in front 20 | N[num=?n] -> JJ[type=attributive] N[num=?n] 21 | 22 | # JJs can have ADVs in front 23 | JJ -> ADV JJ 24 | 25 | # VP expansion rules 26 | VP[tense=?t, num=?n] -> IV[tense=?t, num=?n] 27 | VP[tense=?t, num=?n] -> TV[tense=?t, num=?n] NP[case=acc] 28 | VP[tense=?t, num=?n] -> TVComp[tense=?t, num=?n] S 29 | VP[tense=?t, num=?n] -> DTV[tense=?t, num=?n] NP[case=acc] NP[case=acc] 30 | VP[tense=?t, num=?n] -> EquiV[tense=?t, num=?n] TO VP[tense=inf] 31 | VP[tense=?t, num=?n] -> ObjEquiV[tense=?t, num=?n] NP[case=acc] TO VP[tense=inf] 32 | VP[tense=?t, num=?n] -> RaisingV[tense=?t, num=?n] TO VP[tense=inf] 33 | VP[tense=?t, num=?n] -> ADV VP[tense=?t, num=?n] 34 | VP[tense=?t, num=?n] -> VP[tense=?t, num=?n] PP 35 | VP[tense=?t, num=?n] -> VP[tense=?t, num=?n] CC[sem=cc_vp] VP[tense=?t, num=?n] 36 | 37 | # PP expansion 38 | PP -> IN NP 39 | 40 | # Det types 41 | Det[num=sg] -> DT 42 | Det[num=pl] -> DTS 43 | Det -> AT 44 | Det[num=?n] -> DTI[num=?n] 45 | Det[num=?n] -> ABN[num=?n] 46 | 47 | 48 | ############################# 49 | # Lexical Rules 50 | ############################# 51 | 52 | DT -> 'this' | 'each' 53 | DTS -> 'these' 54 | AT[num=sg, sem=ex_quant] -> 'a' | 'an' 55 | AT[sem=art_def] -> 'the' 56 | DTI[num=sg, sem=univ_quant] -> 'every' 57 | DTI[sem=ex_quant] -> 'some' 58 | ABN[num=sg] -> 'half' 59 | ABN[num=pl, sem=univ_quant] -> 'all' 60 | 61 | PropN[num=sg, gender=m, sem=pn] -> 'Kim' | 'Jody' | 'Mary' | 'Sue' 62 | PropN[num=sg, gender=m, sem=pn] -> 'David' | 'John' | 'Tom' 63 | PropN[num=pl, sem=pn] -> 'JM' 64 | 65 | N[num=sg, sem=n] -> 'boy' | 'car' | 'cat' | 'child' | 'criminal' | 'dog' | 'gift' | 'girl' | 'man' | 'mouse' | 'person' | 'pizza' | 'racketeer' | 'sandwich' | 'senator' | 'student' | 'telescope' | 'thing' | 'unicorn' | 'woman' 66 | N[num=pl, sem=n] -> 'boys' | 'cars' | 'cats' | 'children' | 'criminals' | 'dogs' | 'gifts' | 'girls' | 'men' | 'mice' | 'people' | 'pizzas' | 'racketeers' | 'sandwiches' | 'senators' | 'students' | 'telescopes' | 'things' | 'unicorns' | 'women' 67 | 68 | IV[tense=pres, num=sg, sem=iv] -> 'approaches' | 'comes' | 'disappears' | 'goes' | 'leaves' | 'vanishes' | 'walks' | 'yawns' 69 | IV[tense=pres, num=pl, sem=iv] -> 'approach' | 'come' | 'disappear' | 'go' | 'leave' | 'vanish' | 'walk' | 'yawn' 70 | IV[tense=past, num=?n, sem=iv] -> 'approached' | 'came' | 'disappeared' | 'went' | 'left' | 'vanished' | 'walked' | 'yawned' 71 | IV[tense=inf, num=na, sem=iv] -> 'approach' | 'come' | 'disappear' | 'go' | 'leave' | 'vanish' | 'walk' | 'yawn' 72 | 73 | TV[tense=pres, num=sg, sem=tv] -> 'chases' | 'eats' | 'finds' | 'likes' | 'sees' | 'orders' 74 | TV[tense=pres, num=pl, sem=tv] -> 'chase' | 'eat' | 'find' | 'like' | 'see' | 'order' 75 | TV[tense=past, num=?n, sem=tv] -> 'chased' | 'ate' | 'found' | 'liked' | 'saw' | 'ordered' 76 | TV[tense=inf, num=na, sem=tv] -> 'chase' | 'eat' | 'find' | 'like' | 'see' | 'order' 77 | 78 | DTV[tense=pres, num=sg, sem=dtv] -> 'gives' 79 | DTV[tense=pres, num=pl, sem=dtv] -> 'give' 80 | DTV[tense=past, num=?n, sem=dtv] -> 'gave' 81 | DTV[tense=inf, num=na, sem=dtv] -> 'give' 82 | 83 | TVComp[tense=pres, num=sg, sem=tv_comp] -> 'believes' 84 | TVComp[tense=pres, num=pl, sem=tv_comp] -> 'believe' 85 | TVComp[tense=past, num=?n, sem=tv_comp] -> 'believed' 86 | TVComp[tense=inf, num=na, sem=tv_comp] -> 'believe' 87 | 88 | EquiV[tense=pres, num=sg, sem=equi] -> 'tries' 89 | EquiV[tense=pres, num=pl, sem=equi] -> 'try' 90 | EquiV[tense=past, num=?n, sem=equi] -> 'tried' 91 | EquiV[tense=inf, num=na, sem=equi] -> 'try' 92 | 93 | ObjEquiV[tense=pres, num=sg, sem=obj_equi] -> 'persuades' 94 | ObjEquiV[tense=pres, num=pl, sem=obj_equi] -> 'persuade' 95 | ObjEquiV[tense=past, num=?n, sem=obj_equi] -> 'persuaded' 96 | ObjEquiV[tense=inf, num=na, sem=obj_equi] -> 'persuade' 97 | 98 | RaisingV[tense=pres, num=sg, sem=raising] -> 'seems' 99 | RaisingV[tense=pres, num=pl, sem=raising] -> 'seem' 100 | RaisingV[tense=past, num=?n, sem=raising] -> 'seemed' 101 | RaisingV[tense=inf, num=na, sem=raising] -> 'seem' 102 | 103 | #infinitive marker 104 | TO -> 'to' 105 | 106 | JJ[type=attributive, sem=adj_attributive_intersective] -> 'gray' | 'swedish' 107 | JJ[type=attributive, sem=adj_attributive_nonintersective] -> 'alleged' 108 | JJ[type=attributive, sem=adj_attributive_relative_intersective] -> 'big' | 'fat' 109 | JJ[type=attributive, sem=adj_attributive_relative_nonintersective] -> 'confessed' | 'former' 110 | JJ[type=predicative, sem=adj_predicative] -> 'gray' | 'swedish' 111 | 112 | ADV[sem=adv] -> 'apparently' | 'possibly' | 'very' 113 | ADV[sem=adv_ModifyingRelativeAdj] -> 'very' 114 | 115 | CC[sem=cc_clause] -> 'and' 116 | CC[sem=cc_np] -> 'and' 117 | CC[sem=cc_vp] -> 'and' 118 | 119 | IN -> 'at' | 'by' | 'from' | 'on' | 'with' 120 | 121 | Pro[num=sg, gender=m, -reflex, case=nom, sem=pro] -> 'he' 122 | Pro[num=sg, gender=m, -reflex, case=acc, sem=pro] -> 'him' 123 | Pro[num=sg, gender=m, +reflex, case=acc, sem=pro] -> 'himself' 124 | Pro[num=sg, gender=f, -reflex, sem=pro] -> 'her' 125 | Pro[num=sg, gender=f, +reflex, case=acc, sem=pro] -> 'herself' 126 | Pro[num=sg, gender=n, -reflex, sem=pro] -> 'it' 127 | Pro[num=sg, gender=n, +reflex, case=acc, sem=pro] -> 'itself' 128 | Pro[num=pl, -reflex, case=nom, sem=pro] -> 'they' 129 | Pro[num=pl, -reflex, case=acc, sem=pro] -> 'them' 130 | Pro[num=pl, +reflex, case=acc, sem=pro] -> 'themselves' 131 | Pro[num=pl, +reflex, case=acc, sem=recip] -> 'eachother' 132 | -------------------------------------------------------------------------------- /examples/school/categories.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | from words import * 4 | from nltk.wordnet import * 5 | from operator import itemgetter 6 | import nltk 7 | import re 8 | from string import join 9 | 10 | def build_word_associations(): 11 | cfd = nltk.ConditionalFreqDist() 12 | 13 | # get a list of all English stop words 14 | stopwords_list = nltk.corpus.stopwords.words('english') 15 | 16 | # count words that occur within a window of size 5 ahead of other words 17 | for sentence in nltk.corpus.brown.tagged_sents(): 18 | sentence = [(token.lower(), tag) for (token, tag) in sentence if token.lower() not in stopwords_list] 19 | for (index, (token, tag)) in enumerate(sentence): 20 | if token not in stopwords_list: 21 | window = sentence[index+1:index+5] 22 | for (window_token, window_tag) in window: 23 | if window_token not in stopwords_list and window_tag[0] is tag[0]: 24 | cfd[token].inc(window_token) 25 | return cfd 26 | 27 | def associate(): 28 | while True: 29 | word = raw_input("Enter a word: ") 30 | for i in range(100): 31 | next = cfd[word].max() 32 | if next: 33 | print("->", next,) 34 | word = next 35 | else: 36 | break 37 | print() 38 | 39 | def build_word_contexts(words): 40 | contexts_to_words = {} 41 | words = [w.lower() for w in words] 42 | for i in range(1,len(words)-1): 43 | context = words[i-1]+"_"+words[i+1] 44 | if context not in contexts_to_words: 45 | contexts_to_words[context] = [] 46 | contexts_to_words[context].append(words[i]) 47 | # inverted structure, tracking frequency 48 | words_to_contexts = {} 49 | for context in contexts_to_words: 50 | for word in contexts_to_words[context]: 51 | if word not in words_to_contexts: 52 | words_to_contexts[word] = [] 53 | words_to_contexts[word].append(context) 54 | return words_to_contexts, contexts_to_words 55 | 56 | def search_contexts(words): 57 | words_to_contexts, contexts_to_words = build_word_contexts(words) 58 | while True: 59 | hits = [] 60 | word = raw_input("word> ") 61 | if word not in words_to_contexts: 62 | print("Word not found") 63 | continue 64 | contexts = words_to_contexts[word] 65 | for w in words_to_contexts: # all words 66 | for context in words_to_contexts[w]: 67 | if context in contexts: 68 | hits.append(w) 69 | hit_freqs = count_words(hits).items() 70 | sorted_hits = sorted(hit_freqs, key=itemgetter(1), reverse=True) 71 | words = [word for (word, count) in sorted_hits[1:] if count > 1] 72 | print(join(words)) 73 | 74 | def lookup(word): 75 | for category in [N, V, ADJ, ADV]: 76 | if word in category: 77 | for synset in category[word]: 78 | print(category[word], ":", synset.gloss) 79 | 80 | ############################################ 81 | # Simple Tagger 82 | ############################################ 83 | 84 | # map brown pos tags 85 | # http://khnt.hit.uib.no/icame/manuals/brown/INDEX.HTM 86 | 87 | def map1(tag): 88 | tag = re.sub(r'fw-', '', tag) # foreign words 89 | tag = re.sub(r'-[th]l', '', tag) # headlines, titles 90 | tag = re.sub(r'-nc', '', tag) # cited 91 | tag = re.sub(r'ber?', 'vb', tag) # verb "to be" 92 | tag = re.sub(r'hv', 'vb', tag) # verb "to have" 93 | tag = re.sub(r'do', 'vb', tag) # verb "to do" 94 | tag = re.sub(r'nc', 'nn', tag) # cited word 95 | tag = re.sub(r'z', '', tag) # third-person singular 96 | return tag 97 | 98 | def map2(tag): 99 | tag = re.sub(r'\bj[^-+]*', 'J', tag) # adjectives 100 | tag = re.sub(r'\bp[^-+]*', 'P', tag) # pronouns 101 | tag = re.sub(r'\bm[^-+]*', 'M', tag) # modals 102 | tag = re.sub(r'\bq[^-+]*', 'Q', tag) # qualifiers 103 | tag = re.sub(r'\babl', 'Q', tag) # qualifiers 104 | tag = re.sub(r'\bab[nx]', 'D', tag) # determiners 105 | tag = re.sub(r'\bap', 'D', tag) # determiners 106 | tag = re.sub(r'\bd[^-+]*', 'D', tag) # determiners 107 | tag = re.sub(r'\bat', 'D', tag) # determiners 108 | tag = re.sub(r'\bw[^-+]*', 'W', tag) # wh words 109 | tag = re.sub(r'\br[^-+]*', 'R', tag) # adverbs 110 | tag = re.sub(r'\bto', 'T', tag) # "to" 111 | tag = re.sub(r'\bc[cs]', 'C', tag) # conjunctions 112 | tag = re.sub(r's', '', tag) # plurals 113 | tag = re.sub(r'\bin', 'I', tag) # prepositions 114 | tag = re.sub(r'\buh', 'U', tag) # interjections (uh) 115 | tag = re.sub(r'\bex', 'E', tag) # existential "there" 116 | tag = re.sub(r'\bvbn', 'VN', tag) # past participle 117 | tag = re.sub(r'\bvbd', 'VD', tag) # past tense 118 | tag = re.sub(r'\bvbg', 'VG', tag) # gerund 119 | tag = re.sub(r'\bvb', 'V', tag) # verb 120 | tag = re.sub(r'\bnn', 'N', tag) # noun 121 | tag = re.sub(r'\bnp', 'NP', tag) # proper noun 122 | tag = re.sub(r'\bnr', 'NR', tag) # adverbial noun 123 | tag = re.sub(r'\bex', 'E', tag) # existential "there" 124 | tag = re.sub(r'\bod', 'OD', tag) # ordinal 125 | tag = re.sub(r'\bcd', 'CD', tag) # cardinal 126 | tag = re.sub(r'-t', '', tag) # misc 127 | tag = re.sub(r'[a-z\*]', '', tag) # misc 128 | return tag 129 | 130 | def map(tag): 131 | return map2(map1(tag.lower())) 132 | 133 | # print(sorted(set(map2(map1(tag)) for s in brown.tagged() for w,tag in s))) 134 | 135 | def load_brown_corpus(sections): 136 | global map 137 | corpus = nltk.corpus.brown.tagged_sents(tuple(sections)) 138 | return [[(w.lower(), map(t)) for w, t in sent] for sent in corpus] 139 | 140 | def train_tagger(corpus): 141 | t0 = nltk.tag.Default('N') 142 | t1 = nltk.tag.Unigram(cutoff=0, backoff=t0) 143 | t2 = nltk.tag.Bigram(cutoff=0, backoff=t1) 144 | t3 = nltk.tag.Trigram(cutoff=1, backoff=t2) 145 | 146 | t1.train(corpus, verbose=True) 147 | t2.train(corpus, verbose=True) 148 | t3.train(corpus, verbose=True) 149 | return t3 150 | 151 | def tag(corpus): 152 | print("Training tagger...") 153 | tagger = train_tagger(corpus) 154 | while True: 155 | text = raw_input("sentence> ") 156 | words = text.split() 157 | print(join(word+"/"+tag for word, tag in tagger.tag(words))) 158 | 159 | WORD_OR_TAG = '[^/ ]+' 160 | BOUNDARY = r'\b' 161 | 162 | def process(pattern): 163 | new = [] 164 | for term in pattern.split(): 165 | if re.match('[A-Z]+$', term): 166 | new.append(BOUNDARY + WORD_OR_TAG + '/' + term + BOUNDARY) 167 | elif '/' in term: 168 | new.append(BOUNDARY + term + BOUNDARY) 169 | else: 170 | new.append(BOUNDARY + term + '/' + WORD_OR_TAG + BOUNDARY) 171 | return join(new) 172 | 173 | def search(corpus, num=25): 174 | print("Loading corpus...") 175 | strings = [join(w+'/'+t for (w,t) in sent) for sent in corpus] 176 | while True: 177 | pattern = "" 178 | while not pattern: 179 | pattern = raw_input("search> ") 180 | pattern = process(pattern) 181 | i = 0 182 | for sent in strings: 183 | m = re.search(pattern, sent) 184 | if m: 185 | sent = ' '*35 + sent + ' '*45 186 | print(sent[m.start():m.start()+80]) 187 | i += 1 188 | if i > num: 189 | break 190 | 191 | ############################################ 192 | # Wordnet Browser 193 | # now incorporated into NLTK as wordnet.browse 194 | ############################################ 195 | 196 | ############################################ 197 | # Mad Libs 198 | ############################################ 199 | 200 | madlib = """Britney Spears will meet up with her %(NP)s label for 201 | crisis talks about the future of her %(N)s this week reports Digital Spy. 202 | %(NP)s Records plan to tell Spears to stop %(VG)s and take more 203 | care of her %(J)s image if she wants to avoid being %(VD)s by the noun. 204 | The news %(V)s shortly after Britney posted a message on her 205 | website promising a new album and tour. The last couple of years 206 | have been quite a ride for me, the media has criticized %(P)s every 207 | noun %(C)s printed a skewed perception of who I really am as a human 208 | being, she wrote in a letter posted %(NR)s.""" 209 | 210 | # mapping = {} 211 | # mapping['NP'] = 212 | # mapping['N'] = 213 | # mapping['VG'] = 214 | # mapping['J'] = 215 | # mapping['VD'] = 216 | # mapping['V'] = 217 | # mapping['P'] = 218 | # mapping['C'] = 219 | # mapping['NR'] = 220 | 221 | # print(madlib % mapping) 222 | 223 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /examples/grammars/sample_grammars/chat_pnames.fcfg: -------------------------------------------------------------------------------- 1 | 2 | ################################################################## 3 | # Lexical rules automatically generated by running 'chat80.py -x'. 4 | ################################################################## 5 | 6 | PropN[num=sg, sem=<\P.P(abidjan)>] -> 'Abidjan' 7 | PropN[num=sg, sem=<\P.P(abu_dhabi)>] -> 'Abu_Dhabi' 8 | PropN[num=sg, sem=<\P.P(accra)>] -> 'Accra' 9 | PropN[num=sg, sem=<\P.P(addis_ababa)>] -> 'Addis_Ababa' 10 | PropN[num=sg, sem=<\P.P(aden)>] -> 'Aden' 11 | PropN[num=sg, sem=<\P.P(afghani)>] -> 'Afghani' 12 | PropN[num=sg, sem=<\P.P(afghanistan)>] -> 'Afghanistan' 13 | PropN[num=sg, sem=<\P.P(africa)>] -> 'Africa' 14 | PropN[num=sg, sem=<\P.P(albania)>] -> 'Albania' 15 | PropN[num=sg, sem=<\P.P(algeria)>] -> 'Algeria' 16 | PropN[num=sg, sem=<\P.P(algiers)>] -> 'Algiers' 17 | PropN[num=sg, sem=<\P.P(amazon)>] -> 'Amazon' 18 | PropN[num=sg, sem=<\P.P(america)>] -> 'America' 19 | PropN[num=sg, sem=<\P.P(amman)>] -> 'Amman' 20 | PropN[num=sg, sem=<\P.P(amsterdam)>] -> 'Amsterdam' 21 | PropN[num=sg, sem=<\P.P(amu_darya)>] -> 'Amu_Darya' 22 | PropN[num=sg, sem=<\P.P(amur)>] -> 'Amur' 23 | PropN[num=sg, sem=<\P.P(andorra)>] -> 'Andorra' 24 | PropN[num=sg, sem=<\P.P(andorra_la_villa)>] -> 'Andorra_La_Villa' 25 | PropN[num=sg, sem=<\P.P(angola)>] -> 'Angola' 26 | PropN[num=sg, sem=<\P.P(ankara)>] -> 'Ankara' 27 | PropN[num=sg, sem=<\P.P(antarctic_circle)>] -> 'Antarctic_Circle' 28 | PropN[num=sg, sem=<\P.P(antarctica)>] -> 'Antarctica' 29 | PropN[num=sg, sem=<\P.P(apia)>] -> 'Apia' 30 | PropN[num=sg, sem=<\P.P(arctic_circle)>] -> 'Arctic_Circle' 31 | PropN[num=sg, sem=<\P.P(arctic_ocean)>] -> 'Arctic_Ocean' 32 | PropN[num=sg, sem=<\P.P(argentina)>] -> 'Argentina' 33 | PropN[num=sg, sem=<\P.P(ariary)>] -> 'Ariary' 34 | PropN[num=sg, sem=<\P.P(asia)>] -> 'Asia' 35 | PropN[num=sg, sem=<\P.P(asuncion)>] -> 'Asuncion' 36 | PropN[num=sg, sem=<\P.P(athens)>] -> 'Athens' 37 | PropN[num=sg, sem=<\P.P(atlantic)>] -> 'Atlantic' 38 | PropN[num=sg, sem=<\P.P(australasia)>] -> 'Australasia' 39 | PropN[num=sg, sem=<\P.P(australia)>] -> 'Australia' 40 | PropN[num=sg, sem=<\P.P(australian_dollar)>] -> 'Australian_Dollar' 41 | PropN[num=sg, sem=<\P.P(austria)>] -> 'Austria' 42 | PropN[num=sg, sem=<\P.P(baghdad)>] -> 'Baghdad' 43 | PropN[num=sg, sem=<\P.P(bahamas)>] -> 'Bahamas' 44 | PropN[num=sg, sem=<\P.P(bahamian_dollar)>] -> 'Bahamian_Dollar' 45 | PropN[num=sg, sem=<\P.P(bahrain)>] -> 'Bahrain' 46 | PropN[num=sg, sem=<\P.P(baht)>] -> 'Baht' 47 | PropN[num=sg, sem=<\P.P(balboa)>] -> 'Balboa' 48 | PropN[num=sg, sem=<\P.P(baltic)>] -> 'Baltic' 49 | PropN[num=sg, sem=<\P.P(bamako)>] -> 'Bamako' 50 | PropN[num=sg, sem=<\P.P(bangkok)>] -> 'Bangkok' 51 | PropN[num=sg, sem=<\P.P(bangladesh)>] -> 'Bangladesh' 52 | PropN[num=sg, sem=<\P.P(bangui)>] -> 'Bangui' 53 | PropN[num=sg, sem=<\P.P(banjul)>] -> 'Banjul' 54 | PropN[num=sg, sem=<\P.P(barbados)>] -> 'Barbados' 55 | PropN[num=sg, sem=<\P.P(barcelona)>] -> 'Barcelona' 56 | PropN[num=sg, sem=<\P.P(beirut)>] -> 'Beirut' 57 | PropN[num=sg, sem=<\P.P(belgium)>] -> 'Belgium' 58 | PropN[num=sg, sem=<\P.P(belgrade)>] -> 'Belgrade' 59 | PropN[num=sg, sem=<\P.P(belize)>] -> 'Belize' 60 | PropN[num=sg, sem=<\P.P(belize_town)>] -> 'Belize_Town' 61 | PropN[num=sg, sem=<\P.P(berlin)>] -> 'Berlin' 62 | PropN[num=sg, sem=<\P.P(bern)>] -> 'Bern' 63 | PropN[num=sg, sem=<\P.P(bhutan)>] -> 'Bhutan' 64 | PropN[num=sg, sem=<\P.P(birmingham)>] -> 'Birmingham' 65 | PropN[num=sg, sem=<\P.P(bissau)>] -> 'Bissau' 66 | PropN[num=sg, sem=<\P.P(black_sea)>] -> 'Black_Sea' 67 | PropN[num=sg, sem=<\P.P(bogota)>] -> 'Bogota' 68 | PropN[num=sg, sem=<\P.P(bolivar)>] -> 'Bolivar' 69 | PropN[num=sg, sem=<\P.P(bolivia)>] -> 'Bolivia' 70 | PropN[num=sg, sem=<\P.P(bombay)>] -> 'Bombay' 71 | PropN[num=sg, sem=<\P.P(bonn)>] -> 'Bonn' 72 | PropN[num=sg, sem=<\P.P(botswana)>] -> 'Botswana' 73 | PropN[num=sg, sem=<\P.P(brahmaputra)>] -> 'Brahmaputra' 74 | PropN[num=sg, sem=<\P.P(brasilia)>] -> 'Brasilia' 75 | PropN[num=sg, sem=<\P.P(brazil)>] -> 'Brazil' 76 | PropN[num=sg, sem=<\P.P(brazzaville)>] -> 'Brazzaville' 77 | PropN[num=sg, sem=<\P.P(bridgetown)>] -> 'Bridgetown' 78 | PropN[num=sg, sem=<\P.P(brussels)>] -> 'Brussels' 79 | PropN[num=sg, sem=<\P.P(bucharest)>] -> 'Bucharest' 80 | PropN[num=sg, sem=<\P.P(budapest)>] -> 'Budapest' 81 | PropN[num=sg, sem=<\P.P(buenos_aires)>] -> 'Buenos_Aires' 82 | PropN[num=sg, sem=<\P.P(bujumbura)>] -> 'Bujumbura' 83 | PropN[num=sg, sem=<\P.P(bulgaria)>] -> 'Bulgaria' 84 | PropN[num=sg, sem=<\P.P(burma)>] -> 'Burma' 85 | PropN[num=sg, sem=<\P.P(burundi)>] -> 'Burundi' 86 | PropN[num=sg, sem=<\P.P(cairo)>] -> 'Cairo' 87 | PropN[num=sg, sem=<\P.P(calcutta)>] -> 'Calcutta' 88 | PropN[num=sg, sem=<\P.P(cambodia)>] -> 'Cambodia' 89 | PropN[num=sg, sem=<\P.P(cameroon)>] -> 'Cameroon' 90 | PropN[num=sg, sem=<\P.P(canada)>] -> 'Canada' 91 | PropN[num=sg, sem=<\P.P(canadian_dollar)>] -> 'Canadian_Dollar' 92 | PropN[num=sg, sem=<\P.P(canberra)>] -> 'Canberra' 93 | PropN[num=sg, sem=<\P.P(canton)>] -> 'Canton' 94 | PropN[num=sg, sem=<\P.P(caracas)>] -> 'Caracas' 95 | PropN[num=sg, sem=<\P.P(caribbean)>] -> 'Caribbean' 96 | PropN[num=sg, sem=<\P.P(caspian)>] -> 'Caspian' 97 | PropN[num=sg, sem=<\P.P(cayenne)>] -> 'Cayenne' 98 | PropN[num=sg, sem=<\P.P(cedi)>] -> 'Cedi' 99 | PropN[num=sg, sem=<\P.P(central_africa)>] -> 'Central_Africa' 100 | PropN[num=sg, sem=<\P.P(central_african_republic)>] -> 'Central_African_Republic' 101 | PropN[num=sg, sem=<\P.P(central_america)>] -> 'Central_America' 102 | PropN[num=sg, sem=<\P.P(cfa_franc)>] -> 'Cfa_Franc' 103 | PropN[num=sg, sem=<\P.P(chad)>] -> 'Chad' 104 | PropN[num=sg, sem=<\P.P(chicago)>] -> 'Chicago' 105 | PropN[num=sg, sem=<\P.P(chile)>] -> 'Chile' 106 | PropN[num=sg, sem=<\P.P(china)>] -> 'China' 107 | PropN[num=sg, sem=<\P.P(chungking)>] -> 'Chungking' 108 | PropN[num=sg, sem=<\P.P(colombia)>] -> 'Colombia' 109 | PropN[num=sg, sem=<\P.P(colombo)>] -> 'Colombo' 110 | PropN[num=sg, sem=<\P.P(colon)>] -> 'Colon' 111 | PropN[num=sg, sem=<\P.P(colorado)>] -> 'Colorado' 112 | PropN[num=sg, sem=<\P.P(conakry)>] -> 'Conakry' 113 | PropN[num=sg, sem=<\P.P(congo)>] -> 'Congo' 114 | PropN[num=sg, sem=<\P.P(congo_river)>] -> 'Congo_River' 115 | PropN[num=sg, sem=<\P.P(copenhagen)>] -> 'Copenhagen' 116 | PropN[num=sg, sem=<\P.P(cordoba)>] -> 'Cordoba' 117 | PropN[num=sg, sem=<\P.P(costa_rica)>] -> 'Costa_Rica' 118 | PropN[num=sg, sem=<\P.P(cruzeiro)>] -> 'Cruzeiro' 119 | PropN[num=sg, sem=<\P.P(cuba)>] -> 'Cuba' 120 | PropN[num=sg, sem=<\P.P(cubango)>] -> 'Cubango' 121 | PropN[num=sg, sem=<\P.P(cyprus)>] -> 'Cyprus' 122 | PropN[num=sg, sem=<\P.P(czechoslovakia)>] -> 'Czechoslovakia' 123 | PropN[num=sg, sem=<\P.P(dacca)>] -> 'Dacca' 124 | PropN[num=sg, sem=<\P.P(dahomey)>] -> 'Dahomey' 125 | PropN[num=sg, sem=<\P.P(dairen)>] -> 'Dairen' 126 | PropN[num=sg, sem=<\P.P(dakar)>] -> 'Dakar' 127 | PropN[num=sg, sem=<\P.P(dalasi)>] -> 'Dalasi' 128 | PropN[num=sg, sem=<\P.P(damascus)>] -> 'Damascus' 129 | PropN[num=sg, sem=<\P.P(danube)>] -> 'Danube' 130 | PropN[num=sg, sem=<\P.P(dar_es_salaam)>] -> 'Dar_Es_Salaam' 131 | PropN[num=sg, sem=<\P.P(ddr_mark)>] -> 'Ddr_Mark' 132 | PropN[num=sg, sem=<\P.P(delhi)>] -> 'Delhi' 133 | PropN[num=sg, sem=<\P.P(denmark)>] -> 'Denmark' 134 | PropN[num=sg, sem=<\P.P(detroit)>] -> 'Detroit' 135 | PropN[num=sg, sem=<\P.P(deutsche_mark)>] -> 'Deutsche_Mark' 136 | PropN[num=sg, sem=<\P.P(dinar)>] -> 'Dinar' 137 | PropN[num=sg, sem=<\P.P(dirham)>] -> 'Dirham' 138 | PropN[num=sg, sem=<\P.P(djibouti)>] -> 'Djibouti' 139 | PropN[num=sg, sem=<\P.P(doha)>] -> 'Doha' 140 | PropN[num=sg, sem=<\P.P(dollar)>] -> 'Dollar' 141 | PropN[num=sg, sem=<\P.P(dominican_republic)>] -> 'Dominican_Republic' 142 | PropN[num=sg, sem=<\P.P(don)>] -> 'Don' 143 | PropN[num=sg, sem=<\P.P(dong)>] -> 'Dong' 144 | PropN[num=sg, sem=<\P.P(drachma)>] -> 'Drachma' 145 | PropN[num=sg, sem=<\P.P(dublin)>] -> 'Dublin' 146 | PropN[num=sg, sem=<\P.P(east_africa)>] -> 'East_Africa' 147 | PropN[num=sg, sem=<\P.P(east_berlin)>] -> 'East_Berlin' 148 | PropN[num=sg, sem=<\P.P(east_caribbean_dollar)>] -> 'East_Caribbean_Dollar' 149 | PropN[num=sg, sem=<\P.P(east_carribean_dollar)>] -> 'East_Carribean_Dollar' 150 | PropN[num=sg, sem=<\P.P(east_germany)>] -> 'East_Germany' 151 | PropN[num=sg, sem=<\P.P(eastern_europe)>] -> 'Eastern_Europe' 152 | PropN[num=sg, sem=<\P.P(ecuador)>] -> 'Ecuador' 153 | PropN[num=sg, sem=<\P.P(egypt)>] -> 'Egypt' 154 | PropN[num=sg, sem=<\P.P(egyptian_pound)>] -> 'Egyptian_Pound' 155 | PropN[num=sg, sem=<\P.P(eire)>] -> 'Eire' 156 | PropN[num=sg, sem=<\P.P(el_salvador)>] -> 'El_Salvador' 157 | PropN[num=sg, sem=<\P.P(elbe)>] -> 'Elbe' 158 | PropN[num=sg, sem=<\P.P(equator)>] -> 'Equator' 159 | PropN[num=sg, sem=<\P.P(equatorial_guinea)>] -> 'Equatorial_Guinea' 160 | PropN[num=sg, sem=<\P.P(escudo)>] -> 'Escudo' 161 | PropN[num=sg, sem=<\P.P(ethiopean_dollar)>] -> 'Ethiopean_Dollar' 162 | PropN[num=sg, sem=<\P.P(ethiopia)>] -> 'Ethiopia' 163 | PropN[num=sg, sem=<\P.P(euphrates)>] -> 'Euphrates' 164 | PropN[num=sg, sem=<\P.P(europe)>] -> 'Europe' 165 | PropN[num=sg, sem=<\P.P(far_east)>] -> 'Far_East' 166 | PropN[num=sg, sem=<\P.P(fiji)>] -> 'Fiji' 167 | PropN[num=sg, sem=<\P.P(fiji_dollar)>] -> 'Fiji_Dollar' 168 | PropN[num=sg, sem=<\P.P(finland)>] -> 'Finland' 169 | PropN[num=sg, sem=<\P.P(forint)>] -> 'Forint' 170 | PropN[num=sg, sem=<\P.P(franc)>] -> 'Franc' 171 | PropN[num=sg, sem=<\P.P(franc_peseta)>] -> 'Franc_Peseta' 172 | PropN[num=sg, sem=<\P.P(france)>] -> 'France' 173 | PropN[num=sg, sem=<\P.P(freetown)>] -> 'Freetown' 174 | PropN[num=sg, sem=<\P.P(french_franc)>] -> 'French_Franc' 175 | PropN[num=sg, sem=<\P.P(french_guiana)>] -> 'French_Guiana' 176 | PropN[num=sg, sem=<\P.P(gabon)>] -> 'Gabon' 177 | PropN[num=sg, sem=<\P.P(gaborone)>] -> 'Gaborone' 178 | PropN[num=sg, sem=<\P.P(gambia)>] -> 'Gambia' 179 | PropN[num=sg, sem=<\P.P(ganges)>] -> 'Ganges' 180 | PropN[num=sg, sem=<\P.P(georgetown)>] -> 'Georgetown' 181 | PropN[num=sg, sem=<\P.P(ghana)>] -> 'Ghana' 182 | PropN[num=sg, sem=<\P.P(glasgow)>] -> 'Glasgow' 183 | PropN[num=sg, sem=<\P.P(gourde)>] -> 'Gourde' 184 | PropN[num=sg, sem=<\P.P(greece)>] -> 'Greece' 185 | PropN[num=sg, sem=<\P.P(greenland)>] -> 'Greenland' 186 | PropN[num=sg, sem=<\P.P(grenada)>] -> 'Grenada' 187 | PropN[num=sg, sem=<\P.P(guarani)>] -> 'Guarani' 188 | PropN[num=sg, sem=<\P.P(guatamala_city)>] -> 'Guatamala_City' 189 | PropN[num=sg, sem=<\P.P(guatemala)>] -> 'Guatemala' 190 | PropN[num=sg, sem=<\P.P(guilder)>] -> 'Guilder' 191 | PropN[num=sg, sem=<\P.P(guinea)>] -> 'Guinea' 192 | PropN[num=sg, sem=<\P.P(guinea_bissau)>] -> 'Guinea_Bissau' 193 | PropN[num=sg, sem=<\P.P(guyana)>] -> 'Guyana' 194 | PropN[num=sg, sem=<\P.P(guyana_dollar)>] -> 'Guyana_Dollar' 195 | PropN[num=sg, sem=<\P.P(haiti)>] -> 'Haiti' 196 | PropN[num=sg, sem=<\P.P(hamburg)>] -> 'Hamburg' 197 | PropN[num=sg, sem=<\P.P(hanoi)>] -> 'Hanoi' 198 | PropN[num=sg, sem=<\P.P(harbin)>] -> 'Harbin' 199 | PropN[num=sg, sem=<\P.P(havana)>] -> 'Havana' 200 | PropN[num=sg, sem=<\P.P(helsinki)>] -> 'Helsinki' 201 | PropN[num=sg, sem=<\P.P(honduras)>] -> 'Honduras' 202 | PropN[num=sg, sem=<\P.P(hongkong)>] -> 'Hongkong' 203 | PropN[num=sg, sem=<\P.P(hongkong_city)>] -> 'Hongkong_City' 204 | PropN[num=sg, sem=<\P.P(hungary)>] -> 'Hungary' 205 | PropN[num=sg, sem=<\P.P(hwang_ho)>] -> 'Hwang_Ho' 206 | PropN[num=sg, sem=<\P.P(hyderabad)>] -> 'Hyderabad' 207 | PropN[num=sg, sem=<\P.P(iceland)>] -> 'Iceland' 208 | PropN[num=sg, sem=<\P.P(india)>] -> 'India' 209 | PropN[num=sg, sem=<\P.P(indian_ocean)>] -> 'Indian_Ocean' 210 | PropN[num=sg, sem=<\P.P(indian_rupee)>] -> 'Indian_Rupee' 211 | PropN[num=sg, sem=<\P.P(indian_subcontinent)>] -> 'Indian_Subcontinent' 212 | PropN[num=sg, sem=<\P.P(indonesia)>] -> 'Indonesia' 213 | PropN[num=sg, sem=<\P.P(indus)>] -> 'Indus' 214 | PropN[num=sg, sem=<\P.P(iran)>] -> 'Iran' 215 | PropN[num=sg, sem=<\P.P(iraq)>] -> 'Iraq' 216 | PropN[num=sg, sem=<\P.P(irish_pound)>] -> 'Irish_Pound' 217 | PropN[num=sg, sem=<\P.P(irrawaddy)>] -> 'Irrawaddy' 218 | PropN[num=sg, sem=<\P.P(islamad)>] -> 'Islamad' 219 | PropN[num=sg, sem=<\P.P(israel)>] -> 'Israel' 220 | PropN[num=sg, sem=<\P.P(israeli_pound)>] -> 'Israeli_Pound' 221 | PropN[num=sg, sem=<\P.P(istanbul)>] -> 'Istanbul' 222 | PropN[num=sg, sem=<\P.P(italian_lira)>] -> 'Italian_Lira' 223 | PropN[num=sg, sem=<\P.P(italy)>] -> 'Italy' 224 | PropN[num=sg, sem=<\P.P(ivory_coast)>] -> 'Ivory_Coast' 225 | PropN[num=sg, sem=<\P.P(jakarta)>] -> 'Jakarta' 226 | PropN[num=sg, sem=<\P.P(jamaica)>] -> 'Jamaica' 227 | PropN[num=sg, sem=<\P.P(jamaican_dollar)>] -> 'Jamaican_Dollar' 228 | PropN[num=sg, sem=<\P.P(japan)>] -> 'Japan' 229 | PropN[num=sg, sem=<\P.P(jerusalem)>] -> 'Jerusalem' 230 | PropN[num=sg, sem=<\P.P(johannesburg)>] -> 'Johannesburg' 231 | PropN[num=sg, sem=<\P.P(jordan)>] -> 'Jordan' 232 | PropN[num=sg, sem=<\P.P(kabul)>] -> 'Kabul' 233 | PropN[num=sg, sem=<\P.P(kampala)>] -> 'Kampala' 234 | PropN[num=sg, sem=<\P.P(karachi)>] -> 'Karachi' 235 | PropN[num=sg, sem=<\P.P(katmandu)>] -> 'Katmandu' 236 | PropN[num=sg, sem=<\P.P(kenya)>] -> 'Kenya' 237 | PropN[num=sg, sem=<\P.P(kenya_shilling)>] -> 'Kenya_Shilling' 238 | PropN[num=sg, sem=<\P.P(khartoum)>] -> 'Khartoum' 239 | PropN[num=sg, sem=<\P.P(kiev)>] -> 'Kiev' 240 | PropN[num=sg, sem=<\P.P(kigali)>] -> 'Kigali' 241 | PropN[num=sg, sem=<\P.P(kingston)>] -> 'Kingston' 242 | PropN[num=sg, sem=<\P.P(kinshasa)>] -> 'Kinshasa' 243 | PropN[num=sg, sem=<\P.P(kip)>] -> 'Kip' 244 | PropN[num=sg, sem=<\P.P(kobe)>] -> 'Kobe' 245 | PropN[num=sg, sem=<\P.P(koruna)>] -> 'Koruna' 246 | PropN[num=sg, sem=<\P.P(kowloon)>] -> 'Kowloon' 247 | PropN[num=sg, sem=<\P.P(krona)>] -> 'Krona' 248 | PropN[num=sg, sem=<\P.P(krone)>] -> 'Krone' 249 | PropN[num=sg, sem=<\P.P(kuala_lumpa)>] -> 'Kuala_Lumpa' 250 | PropN[num=sg, sem=<\P.P(kuwait)>] -> 'Kuwait' 251 | PropN[num=sg, sem=<\P.P(kuwait_city)>] -> 'Kuwait_City' 252 | PropN[num=sg, sem=<\P.P(kuwaiti_dinar)>] -> 'Kuwaiti_Dinar' 253 | PropN[num=sg, sem=<\P.P(kwacha)>] -> 'Kwacha' 254 | PropN[num=sg, sem=<\P.P(kyat)>] -> 'Kyat' 255 | PropN[num=sg, sem=<\P.P(kyoto)>] -> 'Kyoto' 256 | PropN[num=sg, sem=<\P.P(lagos)>] -> 'Lagos' 257 | PropN[num=sg, sem=<\P.P(laos)>] -> 'Laos' 258 | PropN[num=sg, sem=<\P.P(lebanese_pound)>] -> 'Lebanese_Pound' 259 | PropN[num=sg, sem=<\P.P(lebanon)>] -> 'Lebanon' 260 | PropN[num=sg, sem=<\P.P(lek)>] -> 'Lek' 261 | PropN[num=sg, sem=<\P.P(lempira)>] -> 'Lempira' 262 | PropN[num=sg, sem=<\P.P(lena)>] -> 'Lena' 263 | PropN[num=sg, sem=<\P.P(leningrad)>] -> 'Leningrad' 264 | PropN[num=sg, sem=<\P.P(leone)>] -> 'Leone' 265 | PropN[num=sg, sem=<\P.P(lesotho)>] -> 'Lesotho' 266 | PropN[num=sg, sem=<\P.P(leu)>] -> 'Leu' 267 | PropN[num=sg, sem=<\P.P(lev)>] -> 'Lev' 268 | PropN[num=sg, sem=<\P.P(liberia)>] -> 'Liberia' 269 | PropN[num=sg, sem=<\P.P(libreville)>] -> 'Libreville' 270 | PropN[num=sg, sem=<\P.P(libya)>] -> 'Libya' 271 | PropN[num=sg, sem=<\P.P(libyan_dinar)>] -> 'Libyan_Dinar' 272 | PropN[num=sg, sem=<\P.P(liechtenstein)>] -> 'Liechtenstein' 273 | PropN[num=sg, sem=<\P.P(lilageru)>] -> 'Lilageru' 274 | PropN[num=sg, sem=<\P.P(lima)>] -> 'Lima' 275 | PropN[num=sg, sem=<\P.P(limpopo)>] -> 'Limpopo' 276 | PropN[num=sg, sem=<\P.P(lira)>] -> 'Lira' 277 | PropN[num=sg, sem=<\P.P(lisbon)>] -> 'Lisbon' 278 | PropN[num=sg, sem=<\P.P(lome)>] -> 'Lome' 279 | PropN[num=sg, sem=<\P.P(london)>] -> 'London' 280 | PropN[num=sg, sem=<\P.P(los_angeles)>] -> 'Los_Angeles' 281 | PropN[num=sg, sem=<\P.P(luanda)>] -> 'Luanda' 282 | PropN[num=sg, sem=<\P.P(lusaka)>] -> 'Lusaka' 283 | PropN[num=sg, sem=<\P.P(luxembourg)>] -> 'Luxembourg' 284 | PropN[num=sg, sem=<\P.P(luxembourg_franc)>] -> 'Luxembourg_Franc' 285 | PropN[num=sg, sem=<\P.P(mackenzie)>] -> 'Mackenzie' 286 | PropN[num=sg, sem=<\P.P(madras)>] -> 'Madras' 287 | PropN[num=sg, sem=<\P.P(madrid)>] -> 'Madrid' 288 | PropN[num=sg, sem=<\P.P(malagasy)>] -> 'Malagasy' 289 | PropN[num=sg, sem=<\P.P(malawi)>] -> 'Malawi' 290 | PropN[num=sg, sem=<\P.P(malaysia)>] -> 'Malaysia' 291 | PropN[num=sg, sem=<\P.P(malaysian_dollar)>] -> 'Malaysian_Dollar' 292 | PropN[num=sg, sem=<\P.P(maldives)>] -> 'Maldives' 293 | PropN[num=sg, sem=<\P.P(male)>] -> 'Male' 294 | PropN[num=sg, sem=<\P.P(mali)>] -> 'Mali' 295 | PropN[num=sg, sem=<\P.P(mali_franc)>] -> 'Mali_Franc' 296 | PropN[num=sg, sem=<\P.P(malta)>] -> 'Malta' 297 | PropN[num=sg, sem=<\P.P(managua)>] -> 'Managua' 298 | PropN[num=sg, sem=<\P.P(manama)>] -> 'Manama' 299 | PropN[num=sg, sem=<\P.P(manila)>] -> 'Manila' 300 | PropN[num=sg, sem=<\P.P(maputo)>] -> 'Maputo' 301 | PropN[num=sg, sem=<\P.P(markka)>] -> 'Markka' 302 | PropN[num=sg, sem=<\P.P(masero)>] -> 'Masero' 303 | PropN[num=sg, sem=<\P.P(mauritania)>] -> 'Mauritania' 304 | PropN[num=sg, sem=<\P.P(mauritius)>] -> 'Mauritius' 305 | PropN[num=sg, sem=<\P.P(mbabane)>] -> 'Mbabane' 306 | PropN[num=sg, sem=<\P.P(mediterranean)>] -> 'the_Mediterranean' 307 | PropN[num=sg, sem=<\P.P(mekong)>] -> 'Mekong' 308 | PropN[num=sg, sem=<\P.P(melbourne)>] -> 'Melbourne' 309 | PropN[num=sg, sem=<\P.P(mexico)>] -> 'Mexico' 310 | PropN[num=sg, sem=<\P.P(mexico_city)>] -> 'Mexico_City' 311 | PropN[num=sg, sem=<\P.P(middle_east)>] -> 'Middle_East' 312 | PropN[num=sg, sem=<\P.P(milan)>] -> 'Milan' 313 | PropN[num=sg, sem=<\P.P(mississippi)>] -> 'Mississippi' 314 | PropN[num=sg, sem=<\P.P(mogadishu)>] -> 'Mogadishu' 315 | PropN[num=sg, sem=<\P.P(monaco)>] -> 'Monaco' 316 | PropN[num=sg, sem=<\P.P(mongolia)>] -> 'Mongolia' 317 | PropN[num=sg, sem=<\P.P(monrovia)>] -> 'Monrovia' 318 | PropN[num=sg, sem=<\P.P(montevideo)>] -> 'Montevideo' 319 | PropN[num=sg, sem=<\P.P(montreal)>] -> 'Montreal' 320 | PropN[num=sg, sem=<\P.P(morocco)>] -> 'Morocco' 321 | PropN[num=sg, sem=<\P.P(moscow)>] -> 'Moscow' 322 | PropN[num=sg, sem=<\P.P(mozambique)>] -> 'Mozambique' 323 | PropN[num=sg, sem=<\P.P(mukden)>] -> 'Mukden' 324 | PropN[num=sg, sem=<\P.P(murray)>] -> 'Murray' 325 | PropN[num=sg, sem=<\P.P(muscat)>] -> 'Muscat' 326 | PropN[num=sg, sem=<\P.P(n_djamena)>] -> 'N_Djamena' 327 | PropN[num=sg, sem=<\P.P(nagoya)>] -> 'Nagoya' 328 | PropN[num=sg, sem=<\P.P(naira)>] -> 'Naira' 329 | PropN[num=sg, sem=<\P.P(nairobi)>] -> 'Nairobi' 330 | PropN[num=sg, sem=<\P.P(nanking)>] -> 'Nanking' 331 | PropN[num=sg, sem=<\P.P(naples)>] -> 'Naples' 332 | PropN[num=sg, sem=<\P.P(nassau)>] -> 'Nassau' 333 | PropN[num=sg, sem=<\P.P(nepal)>] -> 'Nepal' 334 | PropN[num=sg, sem=<\P.P(nepalese_rupee)>] -> 'Nepalese_Rupee' 335 | PropN[num=sg, sem=<\P.P(netherlands)>] -> 'Netherlands' 336 | PropN[num=sg, sem=<\P.P(new_delhi)>] -> 'New_Delhi' 337 | PropN[num=sg, sem=<\P.P(new_york)>] -> 'New_York' 338 | PropN[num=sg, sem=<\P.P(new_zealand)>] -> 'New_Zealand' 339 | PropN[num=sg, sem=<\P.P(new_zealand_dollar)>] -> 'New_Zealand_Dollar' 340 | PropN[num=sg, sem=<\P.P(niamey)>] -> 'Niamey' 341 | PropN[num=sg, sem=<\P.P(nicaragua)>] -> 'Nicaragua' 342 | PropN[num=sg, sem=<\P.P(nicosia)>] -> 'Nicosia' 343 | PropN[num=sg, sem=<\P.P(niger)>] -> 'Niger' 344 | PropN[num=sg, sem=<\P.P(niger_river)>] -> 'Niger_River' 345 | PropN[num=sg, sem=<\P.P(nigeria)>] -> 'Nigeria' 346 | PropN[num=sg, sem=<\P.P(nile)>] -> 'Nile' 347 | PropN[num=sg, sem=<\P.P(north_africa)>] -> 'North_Africa' 348 | PropN[num=sg, sem=<\P.P(north_america)>] -> 'North_America' 349 | PropN[num=sg, sem=<\P.P(north_korea)>] -> 'North_Korea' 350 | PropN[num=sg, sem=<\P.P(northern_asia)>] -> 'Northern_Asia' 351 | PropN[num=sg, sem=<\P.P(norway)>] -> 'Norway' 352 | PropN[num=sg, sem=<\P.P(nouakchott)>] -> 'Nouakchott' 353 | PropN[num=sg, sem=<\P.P(nukualofa)>] -> 'Nukualofa' 354 | PropN[num=sg, sem=<\P.P(ob)>] -> 'Ob' 355 | PropN[num=sg, sem=<\P.P(oder)>] -> 'Oder' 356 | PropN[num=sg, sem=<\P.P(oman)>] -> 'Oman' 357 | PropN[num=sg, sem=<\P.P(orange)>] -> 'Orange' 358 | PropN[num=sg, sem=<\P.P(orinoco)>] -> 'Orinoco' 359 | PropN[num=sg, sem=<\P.P(osaka)>] -> 'Osaka' 360 | PropN[num=sg, sem=<\P.P(oslo)>] -> 'Oslo' 361 | PropN[num=sg, sem=<\P.P(ottawa)>] -> 'Ottawa' 362 | PropN[num=sg, sem=<\P.P(ouagadougou)>] -> 'Ouagadougou' 363 | PropN[num=sg, sem=<\P.P(ouguiya)>] -> 'Ouguiya' 364 | PropN[num=sg, sem=<\P.P(pa_anga)>] -> 'Pa_Anga' 365 | PropN[num=sg, sem=<\P.P(pacific)>] -> 'Pacific' 366 | PropN[num=sg, sem=<\P.P(pakistan)>] -> 'Pakistan' 367 | PropN[num=sg, sem=<\P.P(panama)>] -> 'Panama' 368 | PropN[num=sg, sem=<\P.P(papua_new_guinea)>] -> 'Papua_New_Guinea' 369 | PropN[num=sg, sem=<\P.P(paraguay)>] -> 'Paraguay' 370 | PropN[num=sg, sem=<\P.P(paramaribo)>] -> 'Paramaribo' 371 | PropN[num=sg, sem=<\P.P(parana)>] -> 'Parana' 372 | PropN[num=sg, sem=<\P.P(paris)>] -> 'Paris' 373 | PropN[num=sg, sem=<\P.P(pataca)>] -> 'Pataca' 374 | PropN[num=sg, sem=<\P.P(peking)>] -> 'Peking' 375 | PropN[num=sg, sem=<\P.P(persian_gulf)>] -> 'Persian_Gulf' 376 | PropN[num=sg, sem=<\P.P(peru)>] -> 'Peru' 377 | PropN[num=sg, sem=<\P.P(peseta)>] -> 'Peseta' 378 | PropN[num=sg, sem=<\P.P(peso)>] -> 'Peso' 379 | PropN[num=sg, sem=<\P.P(peveta)>] -> 'Peveta' 380 | PropN[num=sg, sem=<\P.P(philadelphia)>] -> 'Philadelphia' 381 | PropN[num=sg, sem=<\P.P(philippines)>] -> 'Philippines' 382 | PropN[num=sg, sem=<\P.P(phnom_penh)>] -> 'Phnom_Penh' 383 | PropN[num=sg, sem=<\P.P(piso)>] -> 'Piso' 384 | PropN[num=sg, sem=<\P.P(poland)>] -> 'Poland' 385 | PropN[num=sg, sem=<\P.P(port_au_prince)>] -> 'Port_Au_Prince' 386 | PropN[num=sg, sem=<\P.P(port_harcourt)>] -> 'Port_Harcourt' 387 | PropN[num=sg, sem=<\P.P(port_louis)>] -> 'Port_Louis' 388 | PropN[num=sg, sem=<\P.P(port_of_spain)>] -> 'Port_Of_Spain' 389 | PropN[num=sg, sem=<\P.P(porto_novo)>] -> 'Porto_Novo' 390 | PropN[num=sg, sem=<\P.P(portugal)>] -> 'Portugal' 391 | PropN[num=sg, sem=<\P.P(pound)>] -> 'Pound' 392 | PropN[num=sg, sem=<\P.P(prague)>] -> 'Prague' 393 | PropN[num=sg, sem=<\P.P(pretoria)>] -> 'Pretoria' 394 | PropN[num=sg, sem=<\P.P(pusan)>] -> 'Pusan' 395 | PropN[num=sg, sem=<\P.P(pvongvang)>] -> 'Pvongvang' 396 | PropN[num=sg, sem=<\P.P(qatar)>] -> 'Qatar' 397 | PropN[num=sg, sem=<\P.P(quetzal)>] -> 'Quetzal' 398 | PropN[num=sg, sem=<\P.P(quezon_city)>] -> 'Quezon_City' 399 | PropN[num=sg, sem=<\P.P(quito)>] -> 'Quito' 400 | PropN[num=sg, sem=<\P.P(rabat)>] -> 'Rabat' 401 | PropN[num=sg, sem=<\P.P(rand)>] -> 'Rand' 402 | PropN[num=sg, sem=<\P.P(rangoon)>] -> 'Rangoon' 403 | PropN[num=sg, sem=<\P.P(red_sea)>] -> 'Red_Sea' 404 | PropN[num=sg, sem=<\P.P(reykjavik)>] -> 'Reykjavik' 405 | PropN[num=sg, sem=<\P.P(rhine)>] -> 'Rhine' 406 | PropN[num=sg, sem=<\P.P(rhodesian_dollar)>] -> 'Rhodesian_Dollar' 407 | PropN[num=sg, sem=<\P.P(rhone)>] -> 'Rhone' 408 | PropN[num=sg, sem=<\P.P(rial)>] -> 'Rial' 409 | PropN[num=sg, sem=<\P.P(riel)>] -> 'Riel' 410 | PropN[num=sg, sem=<\P.P(rio_de_janeiro)>] -> 'Rio_De_Janeiro' 411 | PropN[num=sg, sem=<\P.P(rio_grande)>] -> 'Rio_Grande' 412 | PropN[num=sg, sem=<\P.P(riyadh)>] -> 'Riyadh' 413 | PropN[num=sg, sem=<\P.P(riyal)>] -> 'Riyal' 414 | PropN[num=sg, sem=<\P.P(riyal_omani)>] -> 'Riyal_Omani' 415 | PropN[num=sg, sem=<\P.P(romania)>] -> 'Romania' 416 | PropN[num=sg, sem=<\P.P(rome)>] -> 'Rome' 417 | PropN[num=sg, sem=<\P.P(ruble)>] -> 'Ruble' 418 | PropN[num=sg, sem=<\P.P(rupee)>] -> 'Rupee' 419 | PropN[num=sg, sem=<\P.P(rupiah)>] -> 'Rupiah' 420 | PropN[num=sg, sem=<\P.P(rwanda)>] -> 'Rwanda' 421 | PropN[num=sg, sem=<\P.P(rwanda_franc)>] -> 'Rwanda_Franc' 422 | PropN[num=sg, sem=<\P.P(saigon)>] -> 'Saigon' 423 | PropN[num=sg, sem=<\P.P(salisbury)>] -> 'Salisbury' 424 | PropN[num=sg, sem=<\P.P(salween)>] -> 'Salween' 425 | PropN[num=sg, sem=<\P.P(san_jose)>] -> 'San_Jose' 426 | PropN[num=sg, sem=<\P.P(san_marino)>] -> 'San_Marino' 427 | PropN[num=sg, sem=<\P.P(san_salvador)>] -> 'San_Salvador' 428 | PropN[num=sg, sem=<\P.P(sana)>] -> 'Sana' 429 | PropN[num=sg, sem=<\P.P(santa_domingo)>] -> 'Santa_Domingo' 430 | PropN[num=sg, sem=<\P.P(santa_isabel)>] -> 'Santa_Isabel' 431 | PropN[num=sg, sem=<\P.P(santiago)>] -> 'Santiago' 432 | PropN[num=sg, sem=<\P.P(sao_paulo)>] -> 'Sao_Paulo' 433 | PropN[num=sg, sem=<\P.P(saudi_arabia)>] -> 'Saudi_Arabia' 434 | PropN[num=sg, sem=<\P.P(scandinavia)>] -> 'Scandinavia' 435 | PropN[num=sg, sem=<\P.P(schilling)>] -> 'Schilling' 436 | PropN[num=sg, sem=<\P.P(senegal)>] -> 'Senegal' 437 | PropN[num=sg, sem=<\P.P(senegal_river)>] -> 'Senegal_River' 438 | PropN[num=sg, sem=<\P.P(seoul)>] -> 'Seoul' 439 | PropN[num=sg, sem=<\P.P(seychelles)>] -> 'Seychelles' 440 | PropN[num=sg, sem=<\P.P(shanghai)>] -> 'Shanghai' 441 | PropN[num=sg, sem=<\P.P(sian)>] -> 'Sian' 442 | PropN[num=sg, sem=<\P.P(sierra_leone)>] -> 'Sierra_Leone' 443 | PropN[num=sg, sem=<\P.P(singapore)>] -> 'Singapore' 444 | PropN[num=sg, sem=<\P.P(singapore_city)>] -> 'Singapore_City' 445 | PropN[num=sg, sem=<\P.P(singapore_dollar)>] -> 'Singapore_Dollar' 446 | PropN[num=sg, sem=<\P.P(sofia)>] -> 'Sofia' 447 | PropN[num=sg, sem=<\P.P(sol)>] -> 'Sol' 448 | PropN[num=sg, sem=<\P.P(somali_shilling)>] -> 'Somali_Shilling' 449 | PropN[num=sg, sem=<\P.P(somalia)>] -> 'Somalia' 450 | PropN[num=sg, sem=<\P.P(south_africa)>] -> 'South_Africa' 451 | PropN[num=sg, sem=<\P.P(south_african_rand)>] -> 'South_African_Rand' 452 | PropN[num=sg, sem=<\P.P(south_america)>] -> 'South_America' 453 | PropN[num=sg, sem=<\P.P(south_korea)>] -> 'South_Korea' 454 | PropN[num=sg, sem=<\P.P(south_yemen)>] -> 'South_Yemen' 455 | PropN[num=sg, sem=<\P.P(southeast_east)>] -> 'Southeast_East' 456 | PropN[num=sg, sem=<\P.P(southern_africa)>] -> 'Southern_Africa' 457 | PropN[num=sg, sem=<\P.P(southern_europe)>] -> 'Southern_Europe' 458 | PropN[num=sg, sem=<\P.P(southern_ocean)>] -> 'Southern_Ocean' 459 | PropN[num=sg, sem=<\P.P(soviet_union)>] -> 'Soviet_Union' 460 | PropN[num=sg, sem=<\P.P(spain)>] -> 'Spain' 461 | PropN[num=sg, sem=<\P.P(sri_lanka)>] -> 'Sri_Lanka' 462 | PropN[num=sg, sem=<\P.P(st_georges)>] -> 'St_Georges' 463 | PropN[num=sg, sem=<\P.P(stockholm)>] -> 'Stockholm' 464 | PropN[num=sg, sem=<\P.P(sucre)>] -> 'Sucre' 465 | PropN[num=sg, sem=<\P.P(sudan)>] -> 'Sudan' 466 | PropN[num=sg, sem=<\P.P(surinam)>] -> 'Surinam' 467 | PropN[num=sg, sem=<\P.P(suva)>] -> 'Suva' 468 | PropN[num=sg, sem=<\P.P(swaziland)>] -> 'Swaziland' 469 | PropN[num=sg, sem=<\P.P(sweden)>] -> 'Sweden' 470 | PropN[num=sg, sem=<\P.P(swiss_franc)>] -> 'Swiss_Franc' 471 | PropN[num=sg, sem=<\P.P(switzerland)>] -> 'Switzerland' 472 | PropN[num=sg, sem=<\P.P(sydney)>] -> 'Sydney' 473 | PropN[num=sg, sem=<\P.P(syli)>] -> 'Syli' 474 | PropN[num=sg, sem=<\P.P(syria)>] -> 'Syria' 475 | PropN[num=sg, sem=<\P.P(syrian_pound)>] -> 'Syrian_Pound' 476 | PropN[num=sg, sem=<\P.P(tagus)>] -> 'Tagus' 477 | PropN[num=sg, sem=<\P.P(taipei)>] -> 'Taipei' 478 | PropN[num=sg, sem=<\P.P(taiwan)>] -> 'Taiwan' 479 | PropN[num=sg, sem=<\P.P(taiwan_dollar)>] -> 'Taiwan_Dollar' 480 | PropN[num=sg, sem=<\P.P(taka)>] -> 'Taka' 481 | PropN[num=sg, sem=<\P.P(tala)>] -> 'Tala' 482 | PropN[num=sg, sem=<\P.P(tananarive)>] -> 'Tananarive' 483 | PropN[num=sg, sem=<\P.P(tanzania)>] -> 'Tanzania' 484 | PropN[num=sg, sem=<\P.P(tanzanian_shilling)>] -> 'Tanzanian_Shilling' 485 | PropN[num=sg, sem=<\P.P(tegucigalpa)>] -> 'Tegucigalpa' 486 | PropN[num=sg, sem=<\P.P(tehran)>] -> 'Tehran' 487 | PropN[num=sg, sem=<\P.P(thailand)>] -> 'Thailand' 488 | PropN[num=sg, sem=<\P.P(thimphu)>] -> 'Thimphu' 489 | PropN[num=sg, sem=<\P.P(tientsin)>] -> 'Tientsin' 490 | PropN[num=sg, sem=<\P.P(tighrik)>] -> 'Tighrik' 491 | PropN[num=sg, sem=<\P.P(tirana)>] -> 'Tirana' 492 | PropN[num=sg, sem=<\P.P(togo)>] -> 'Togo' 493 | PropN[num=sg, sem=<\P.P(tokyo)>] -> 'Tokyo' 494 | PropN[num=sg, sem=<\P.P(tonga)>] -> 'Tonga' 495 | PropN[num=sg, sem=<\P.P(toronto)>] -> 'Toronto' 496 | PropN[num=sg, sem=<\P.P(trinidad_and_tobago)>] -> 'Trinidad_And_Tobago' 497 | PropN[num=sg, sem=<\P.P(trinidad_and_tobago_dollar)>] -> 'Trinidad_And_Tobago_Dollar' 498 | PropN[num=sg, sem=<\P.P(tripoli)>] -> 'Tripoli' 499 | PropN[num=sg, sem=<\P.P(tropic_of_cancer)>] -> 'Tropic_Of_Cancer' 500 | PropN[num=sg, sem=<\P.P(tropic_of_capricorn)>] -> 'Tropic_Of_Capricorn' 501 | PropN[num=sg, sem=<\P.P(tunis)>] -> 'Tunis' 502 | PropN[num=sg, sem=<\P.P(tunisia)>] -> 'Tunisia' 503 | PropN[num=sg, sem=<\P.P(turkey)>] -> 'Turkey' 504 | PropN[num=sg, sem=<\P.P(uganda)>] -> 'Uganda' 505 | PropN[num=sg, sem=<\P.P(uganda_shilling)>] -> 'Uganda_Shilling' 506 | PropN[num=sg, sem=<\P.P(ulan_bator)>] -> 'Ulan_Bator' 507 | PropN[num=sg, sem=<\P.P(united_arab_emirates)>] -> 'United_Arab_Emirates' 508 | PropN[num=sg, sem=<\P.P(united_kingdom)>] -> 'United_Kingdom' 509 | PropN[num=sg, sem=<\P.P(united_states)>] -> 'United_States' 510 | PropN[num=sg, sem=<\P.P(upper_volta)>] -> 'Upper_Volta' 511 | PropN[num=sg, sem=<\P.P(uruguay)>] -> 'Uruguay' 512 | PropN[num=sg, sem=<\P.P(us_dollar)>] -> 'Us_Dollar' 513 | PropN[num=sg, sem=<\P.P(vaduz)>] -> 'Vaduz' 514 | PropN[num=sg, sem=<\P.P(valetta)>] -> 'Valetta' 515 | PropN[num=sg, sem=<\P.P(venezuela)>] -> 'Venezuela' 516 | PropN[num=sg, sem=<\P.P(victoria)>] -> 'Victoria' 517 | PropN[num=sg, sem=<\P.P(vienna)>] -> 'Vienna' 518 | PropN[num=sg, sem=<\P.P(vientiane)>] -> 'Vientiane' 519 | PropN[num=sg, sem=<\P.P(vietnam)>] -> 'Vietnam' 520 | PropN[num=sg, sem=<\P.P(vistula)>] -> 'Vistula' 521 | PropN[num=sg, sem=<\P.P(volga)>] -> 'Volga' 522 | PropN[num=sg, sem=<\P.P(volta)>] -> 'Volta' 523 | PropN[num=sg, sem=<\P.P(warsaw)>] -> 'Warsaw' 524 | PropN[num=sg, sem=<\P.P(washington)>] -> 'Washington' 525 | PropN[num=sg, sem=<\P.P(wellington)>] -> 'Wellington' 526 | PropN[num=sg, sem=<\P.P(west_africa)>] -> 'West_Africa' 527 | PropN[num=sg, sem=<\P.P(west_germany)>] -> 'West_Germany' 528 | PropN[num=sg, sem=<\P.P(western_europe)>] -> 'Western_Europe' 529 | PropN[num=sg, sem=<\P.P(western_samoa)>] -> 'Western_Samoa' 530 | PropN[num=sg, sem=<\P.P(won)>] -> 'Won' 531 | PropN[num=sg, sem=<\P.P(yangtze)>] -> 'Yangtze' 532 | PropN[num=sg, sem=<\P.P(yaounde)>] -> 'Yaounde' 533 | PropN[num=sg, sem=<\P.P(yemen)>] -> 'Yemen' 534 | PropN[num=sg, sem=<\P.P(yen)>] -> 'Yen' 535 | PropN[num=sg, sem=<\P.P(yenisei)>] -> 'Yenisei' 536 | PropN[num=sg, sem=<\P.P(yokohama)>] -> 'Yokohama' 537 | PropN[num=sg, sem=<\P.P(yuan)>] -> 'Yuan' 538 | PropN[num=sg, sem=<\P.P(yugoslavia)>] -> 'Yugoslavia' 539 | PropN[num=sg, sem=<\P.P(yukon)>] -> 'Yukon' 540 | PropN[num=sg, sem=<\P.P(zaire)>] -> 'Zaire' 541 | PropN[num=sg, sem=<\P.P(zambesi)>] -> 'Zambesi' 542 | PropN[num=sg, sem=<\P.P(zambia)>] -> 'Zambia' 543 | PropN[num=sg, sem=<\P.P(zimbabwe)>] -> 'Zimbabwe' 544 | PropN[num=sg, sem=<\P.P(zloty)>] -> 'Zloty' 545 | PropN[num=sg, sem=<\P.P(zomba)>] -> 'Zomba' 546 | -------------------------------------------------------------------------------- /examples/semantics/chat_pnames.cfg: -------------------------------------------------------------------------------- 1 | 2 | ################################################################## 3 | # Lexical rules automatically generated by running 'chat80.py -x'. 4 | ################################################################## 5 | 6 | PropN[num=sg, sem=<\P.(P abidjan)>] -> 'Abidjan' 7 | PropN[num=sg, sem=<\P.(P abu_dhabi)>] -> 'Abu_Dhabi' 8 | PropN[num=sg, sem=<\P.(P accra)>] -> 'Accra' 9 | PropN[num=sg, sem=<\P.(P addis_ababa)>] -> 'Addis_Ababa' 10 | PropN[num=sg, sem=<\P.(P aden)>] -> 'Aden' 11 | PropN[num=sg, sem=<\P.(P afghani)>] -> 'Afghani' 12 | PropN[num=sg, sem=<\P.(P afghanistan)>] -> 'Afghanistan' 13 | PropN[num=sg, sem=<\P.(P africa)>] -> 'Africa' 14 | PropN[num=sg, sem=<\P.(P albania)>] -> 'Albania' 15 | PropN[num=sg, sem=<\P.(P algeria)>] -> 'Algeria' 16 | PropN[num=sg, sem=<\P.(P algiers)>] -> 'Algiers' 17 | PropN[num=sg, sem=<\P.(P amazon)>] -> 'Amazon' 18 | PropN[num=sg, sem=<\P.(P america)>] -> 'America' 19 | PropN[num=sg, sem=<\P.(P amman)>] -> 'Amman' 20 | PropN[num=sg, sem=<\P.(P amsterdam)>] -> 'Amsterdam' 21 | PropN[num=sg, sem=<\P.(P amu_darya)>] -> 'Amu_Darya' 22 | PropN[num=sg, sem=<\P.(P amur)>] -> 'Amur' 23 | PropN[num=sg, sem=<\P.(P andorra)>] -> 'Andorra' 24 | PropN[num=sg, sem=<\P.(P andorra_la_villa)>] -> 'Andorra_La_Villa' 25 | PropN[num=sg, sem=<\P.(P angola)>] -> 'Angola' 26 | PropN[num=sg, sem=<\P.(P ankara)>] -> 'Ankara' 27 | PropN[num=sg, sem=<\P.(P antarctic_circle)>] -> 'Antarctic_Circle' 28 | PropN[num=sg, sem=<\P.(P antarctica)>] -> 'Antarctica' 29 | PropN[num=sg, sem=<\P.(P apia)>] -> 'Apia' 30 | PropN[num=sg, sem=<\P.(P arctic_circle)>] -> 'Arctic_Circle' 31 | PropN[num=sg, sem=<\P.(P arctic_ocean)>] -> 'Arctic_Ocean' 32 | PropN[num=sg, sem=<\P.(P argentina)>] -> 'Argentina' 33 | PropN[num=sg, sem=<\P.(P ariary)>] -> 'Ariary' 34 | PropN[num=sg, sem=<\P.(P asia)>] -> 'Asia' 35 | PropN[num=sg, sem=<\P.(P asuncion)>] -> 'Asuncion' 36 | PropN[num=sg, sem=<\P.(P athens)>] -> 'Athens' 37 | PropN[num=sg, sem=<\P.(P atlantic)>] -> 'Atlantic' 38 | PropN[num=sg, sem=<\P.(P australasia)>] -> 'Australasia' 39 | PropN[num=sg, sem=<\P.(P australia)>] -> 'Australia' 40 | PropN[num=sg, sem=<\P.(P australian_dollar)>] -> 'Australian_Dollar' 41 | PropN[num=sg, sem=<\P.(P austria)>] -> 'Austria' 42 | PropN[num=sg, sem=<\P.(P baghdad)>] -> 'Baghdad' 43 | PropN[num=sg, sem=<\P.(P bahamas)>] -> 'Bahamas' 44 | PropN[num=sg, sem=<\P.(P bahamian_dollar)>] -> 'Bahamian_Dollar' 45 | PropN[num=sg, sem=<\P.(P bahrain)>] -> 'Bahrain' 46 | PropN[num=sg, sem=<\P.(P baht)>] -> 'Baht' 47 | PropN[num=sg, sem=<\P.(P balboa)>] -> 'Balboa' 48 | PropN[num=sg, sem=<\P.(P baltic)>] -> 'Baltic' 49 | PropN[num=sg, sem=<\P.(P bamako)>] -> 'Bamako' 50 | PropN[num=sg, sem=<\P.(P bangkok)>] -> 'Bangkok' 51 | PropN[num=sg, sem=<\P.(P bangladesh)>] -> 'Bangladesh' 52 | PropN[num=sg, sem=<\P.(P bangui)>] -> 'Bangui' 53 | PropN[num=sg, sem=<\P.(P banjul)>] -> 'Banjul' 54 | PropN[num=sg, sem=<\P.(P barbados)>] -> 'Barbados' 55 | PropN[num=sg, sem=<\P.(P barcelona)>] -> 'Barcelona' 56 | PropN[num=sg, sem=<\P.(P beirut)>] -> 'Beirut' 57 | PropN[num=sg, sem=<\P.(P belgium)>] -> 'Belgium' 58 | PropN[num=sg, sem=<\P.(P belgrade)>] -> 'Belgrade' 59 | PropN[num=sg, sem=<\P.(P belize)>] -> 'Belize' 60 | PropN[num=sg, sem=<\P.(P belize_town)>] -> 'Belize_Town' 61 | PropN[num=sg, sem=<\P.(P berlin)>] -> 'Berlin' 62 | PropN[num=sg, sem=<\P.(P bern)>] -> 'Bern' 63 | PropN[num=sg, sem=<\P.(P bhutan)>] -> 'Bhutan' 64 | PropN[num=sg, sem=<\P.(P birmingham)>] -> 'Birmingham' 65 | PropN[num=sg, sem=<\P.(P bissau)>] -> 'Bissau' 66 | PropN[num=sg, sem=<\P.(P black_sea)>] -> 'Black_Sea' 67 | PropN[num=sg, sem=<\P.(P bogota)>] -> 'Bogota' 68 | PropN[num=sg, sem=<\P.(P bolivar)>] -> 'Bolivar' 69 | PropN[num=sg, sem=<\P.(P bolivia)>] -> 'Bolivia' 70 | PropN[num=sg, sem=<\P.(P bombay)>] -> 'Bombay' 71 | PropN[num=sg, sem=<\P.(P bonn)>] -> 'Bonn' 72 | PropN[num=sg, sem=<\P.(P botswana)>] -> 'Botswana' 73 | PropN[num=sg, sem=<\P.(P brahmaputra)>] -> 'Brahmaputra' 74 | PropN[num=sg, sem=<\P.(P brasilia)>] -> 'Brasilia' 75 | PropN[num=sg, sem=<\P.(P brazil)>] -> 'Brazil' 76 | PropN[num=sg, sem=<\P.(P brazzaville)>] -> 'Brazzaville' 77 | PropN[num=sg, sem=<\P.(P bridgetown)>] -> 'Bridgetown' 78 | PropN[num=sg, sem=<\P.(P brussels)>] -> 'Brussels' 79 | PropN[num=sg, sem=<\P.(P bucharest)>] -> 'Bucharest' 80 | PropN[num=sg, sem=<\P.(P budapest)>] -> 'Budapest' 81 | PropN[num=sg, sem=<\P.(P buenos_aires)>] -> 'Buenos_Aires' 82 | PropN[num=sg, sem=<\P.(P bujumbura)>] -> 'Bujumbura' 83 | PropN[num=sg, sem=<\P.(P bulgaria)>] -> 'Bulgaria' 84 | PropN[num=sg, sem=<\P.(P burma)>] -> 'Burma' 85 | PropN[num=sg, sem=<\P.(P burundi)>] -> 'Burundi' 86 | PropN[num=sg, sem=<\P.(P cairo)>] -> 'Cairo' 87 | PropN[num=sg, sem=<\P.(P calcutta)>] -> 'Calcutta' 88 | PropN[num=sg, sem=<\P.(P cambodia)>] -> 'Cambodia' 89 | PropN[num=sg, sem=<\P.(P cameroon)>] -> 'Cameroon' 90 | PropN[num=sg, sem=<\P.(P canada)>] -> 'Canada' 91 | PropN[num=sg, sem=<\P.(P canadian_dollar)>] -> 'Canadian_Dollar' 92 | PropN[num=sg, sem=<\P.(P canberra)>] -> 'Canberra' 93 | PropN[num=sg, sem=<\P.(P canton)>] -> 'Canton' 94 | PropN[num=sg, sem=<\P.(P caracas)>] -> 'Caracas' 95 | PropN[num=sg, sem=<\P.(P caribbean)>] -> 'Caribbean' 96 | PropN[num=sg, sem=<\P.(P caspian)>] -> 'Caspian' 97 | PropN[num=sg, sem=<\P.(P cayenne)>] -> 'Cayenne' 98 | PropN[num=sg, sem=<\P.(P cedi)>] -> 'Cedi' 99 | PropN[num=sg, sem=<\P.(P central_africa)>] -> 'Central_Africa' 100 | PropN[num=sg, sem=<\P.(P central_african_republic)>] -> 'Central_African_Republic' 101 | PropN[num=sg, sem=<\P.(P central_america)>] -> 'Central_America' 102 | PropN[num=sg, sem=<\P.(P cfa_franc)>] -> 'Cfa_Franc' 103 | PropN[num=sg, sem=<\P.(P chad)>] -> 'Chad' 104 | PropN[num=sg, sem=<\P.(P chicago)>] -> 'Chicago' 105 | PropN[num=sg, sem=<\P.(P chile)>] -> 'Chile' 106 | PropN[num=sg, sem=<\P.(P china)>] -> 'China' 107 | PropN[num=sg, sem=<\P.(P chungking)>] -> 'Chungking' 108 | PropN[num=sg, sem=<\P.(P colombia)>] -> 'Colombia' 109 | PropN[num=sg, sem=<\P.(P colombo)>] -> 'Colombo' 110 | PropN[num=sg, sem=<\P.(P colon)>] -> 'Colon' 111 | PropN[num=sg, sem=<\P.(P colorado)>] -> 'Colorado' 112 | PropN[num=sg, sem=<\P.(P conakry)>] -> 'Conakry' 113 | PropN[num=sg, sem=<\P.(P congo)>] -> 'Congo' 114 | PropN[num=sg, sem=<\P.(P congo_river)>] -> 'Congo_River' 115 | PropN[num=sg, sem=<\P.(P copenhagen)>] -> 'Copenhagen' 116 | PropN[num=sg, sem=<\P.(P cordoba)>] -> 'Cordoba' 117 | PropN[num=sg, sem=<\P.(P costa_rica)>] -> 'Costa_Rica' 118 | PropN[num=sg, sem=<\P.(P cruzeiro)>] -> 'Cruzeiro' 119 | PropN[num=sg, sem=<\P.(P cuba)>] -> 'Cuba' 120 | PropN[num=sg, sem=<\P.(P cubango)>] -> 'Cubango' 121 | PropN[num=sg, sem=<\P.(P cyprus)>] -> 'Cyprus' 122 | PropN[num=sg, sem=<\P.(P czechoslovakia)>] -> 'Czechoslovakia' 123 | PropN[num=sg, sem=<\P.(P dacca)>] -> 'Dacca' 124 | PropN[num=sg, sem=<\P.(P dahomey)>] -> 'Dahomey' 125 | PropN[num=sg, sem=<\P.(P dairen)>] -> 'Dairen' 126 | PropN[num=sg, sem=<\P.(P dakar)>] -> 'Dakar' 127 | PropN[num=sg, sem=<\P.(P dalasi)>] -> 'Dalasi' 128 | PropN[num=sg, sem=<\P.(P damascus)>] -> 'Damascus' 129 | PropN[num=sg, sem=<\P.(P danube)>] -> 'Danube' 130 | PropN[num=sg, sem=<\P.(P dar_es_salaam)>] -> 'Dar_Es_Salaam' 131 | PropN[num=sg, sem=<\P.(P ddr_mark)>] -> 'Ddr_Mark' 132 | PropN[num=sg, sem=<\P.(P delhi)>] -> 'Delhi' 133 | PropN[num=sg, sem=<\P.(P denmark)>] -> 'Denmark' 134 | PropN[num=sg, sem=<\P.(P detroit)>] -> 'Detroit' 135 | PropN[num=sg, sem=<\P.(P deutsche_mark)>] -> 'Deutsche_Mark' 136 | PropN[num=sg, sem=<\P.(P dinar)>] -> 'Dinar' 137 | PropN[num=sg, sem=<\P.(P dirham)>] -> 'Dirham' 138 | PropN[num=sg, sem=<\P.(P djibouti)>] -> 'Djibouti' 139 | PropN[num=sg, sem=<\P.(P doha)>] -> 'Doha' 140 | PropN[num=sg, sem=<\P.(P dollar)>] -> 'Dollar' 141 | PropN[num=sg, sem=<\P.(P dominican_republic)>] -> 'Dominican_Republic' 142 | PropN[num=sg, sem=<\P.(P don)>] -> 'Don' 143 | PropN[num=sg, sem=<\P.(P dong)>] -> 'Dong' 144 | PropN[num=sg, sem=<\P.(P drachma)>] -> 'Drachma' 145 | PropN[num=sg, sem=<\P.(P dublin)>] -> 'Dublin' 146 | PropN[num=sg, sem=<\P.(P east_africa)>] -> 'East_Africa' 147 | PropN[num=sg, sem=<\P.(P east_berlin)>] -> 'East_Berlin' 148 | PropN[num=sg, sem=<\P.(P east_caribbean_dollar)>] -> 'East_Caribbean_Dollar' 149 | PropN[num=sg, sem=<\P.(P east_carribean_dollar)>] -> 'East_Carribean_Dollar' 150 | PropN[num=sg, sem=<\P.(P east_germany)>] -> 'East_Germany' 151 | PropN[num=sg, sem=<\P.(P eastern_europe)>] -> 'Eastern_Europe' 152 | PropN[num=sg, sem=<\P.(P ecuador)>] -> 'Ecuador' 153 | PropN[num=sg, sem=<\P.(P egypt)>] -> 'Egypt' 154 | PropN[num=sg, sem=<\P.(P egyptian_pound)>] -> 'Egyptian_Pound' 155 | PropN[num=sg, sem=<\P.(P eire)>] -> 'Eire' 156 | PropN[num=sg, sem=<\P.(P el_salvador)>] -> 'El_Salvador' 157 | PropN[num=sg, sem=<\P.(P elbe)>] -> 'Elbe' 158 | PropN[num=sg, sem=<\P.(P equator)>] -> 'Equator' 159 | PropN[num=sg, sem=<\P.(P equatorial_guinea)>] -> 'Equatorial_Guinea' 160 | PropN[num=sg, sem=<\P.(P escudo)>] -> 'Escudo' 161 | PropN[num=sg, sem=<\P.(P ethiopean_dollar)>] -> 'Ethiopean_Dollar' 162 | PropN[num=sg, sem=<\P.(P ethiopia)>] -> 'Ethiopia' 163 | PropN[num=sg, sem=<\P.(P euphrates)>] -> 'Euphrates' 164 | PropN[num=sg, sem=<\P.(P europe)>] -> 'Europe' 165 | PropN[num=sg, sem=<\P.(P far_east)>] -> 'Far_East' 166 | PropN[num=sg, sem=<\P.(P fiji)>] -> 'Fiji' 167 | PropN[num=sg, sem=<\P.(P fiji_dollar)>] -> 'Fiji_Dollar' 168 | PropN[num=sg, sem=<\P.(P finland)>] -> 'Finland' 169 | PropN[num=sg, sem=<\P.(P forint)>] -> 'Forint' 170 | PropN[num=sg, sem=<\P.(P franc)>] -> 'Franc' 171 | PropN[num=sg, sem=<\P.(P franc_peseta)>] -> 'Franc_Peseta' 172 | PropN[num=sg, sem=<\P.(P france)>] -> 'France' 173 | PropN[num=sg, sem=<\P.(P freetown)>] -> 'Freetown' 174 | PropN[num=sg, sem=<\P.(P french_franc)>] -> 'French_Franc' 175 | PropN[num=sg, sem=<\P.(P french_guiana)>] -> 'French_Guiana' 176 | PropN[num=sg, sem=<\P.(P gabon)>] -> 'Gabon' 177 | PropN[num=sg, sem=<\P.(P gaborone)>] -> 'Gaborone' 178 | PropN[num=sg, sem=<\P.(P gambia)>] -> 'Gambia' 179 | PropN[num=sg, sem=<\P.(P ganges)>] -> 'Ganges' 180 | PropN[num=sg, sem=<\P.(P georgetown)>] -> 'Georgetown' 181 | PropN[num=sg, sem=<\P.(P ghana)>] -> 'Ghana' 182 | PropN[num=sg, sem=<\P.(P glasgow)>] -> 'Glasgow' 183 | PropN[num=sg, sem=<\P.(P gourde)>] -> 'Gourde' 184 | PropN[num=sg, sem=<\P.(P greece)>] -> 'Greece' 185 | PropN[num=sg, sem=<\P.(P greenland)>] -> 'Greenland' 186 | PropN[num=sg, sem=<\P.(P grenada)>] -> 'Grenada' 187 | PropN[num=sg, sem=<\P.(P guarani)>] -> 'Guarani' 188 | PropN[num=sg, sem=<\P.(P guatamala_city)>] -> 'Guatamala_City' 189 | PropN[num=sg, sem=<\P.(P guatemala)>] -> 'Guatemala' 190 | PropN[num=sg, sem=<\P.(P guilder)>] -> 'Guilder' 191 | PropN[num=sg, sem=<\P.(P guinea)>] -> 'Guinea' 192 | PropN[num=sg, sem=<\P.(P guinea_bissau)>] -> 'Guinea_Bissau' 193 | PropN[num=sg, sem=<\P.(P guyana)>] -> 'Guyana' 194 | PropN[num=sg, sem=<\P.(P guyana_dollar)>] -> 'Guyana_Dollar' 195 | PropN[num=sg, sem=<\P.(P haiti)>] -> 'Haiti' 196 | PropN[num=sg, sem=<\P.(P hamburg)>] -> 'Hamburg' 197 | PropN[num=sg, sem=<\P.(P hanoi)>] -> 'Hanoi' 198 | PropN[num=sg, sem=<\P.(P harbin)>] -> 'Harbin' 199 | PropN[num=sg, sem=<\P.(P havana)>] -> 'Havana' 200 | PropN[num=sg, sem=<\P.(P helsinki)>] -> 'Helsinki' 201 | PropN[num=sg, sem=<\P.(P honduras)>] -> 'Honduras' 202 | PropN[num=sg, sem=<\P.(P hongkong)>] -> 'Hongkong' 203 | PropN[num=sg, sem=<\P.(P hongkong_city)>] -> 'Hongkong_City' 204 | PropN[num=sg, sem=<\P.(P hungary)>] -> 'Hungary' 205 | PropN[num=sg, sem=<\P.(P hwang_ho)>] -> 'Hwang_Ho' 206 | PropN[num=sg, sem=<\P.(P hyderabad)>] -> 'Hyderabad' 207 | PropN[num=sg, sem=<\P.(P iceland)>] -> 'Iceland' 208 | PropN[num=sg, sem=<\P.(P india)>] -> 'India' 209 | PropN[num=sg, sem=<\P.(P indian_ocean)>] -> 'Indian_Ocean' 210 | PropN[num=sg, sem=<\P.(P indian_rupee)>] -> 'Indian_Rupee' 211 | PropN[num=sg, sem=<\P.(P indian_subcontinent)>] -> 'Indian_Subcontinent' 212 | PropN[num=sg, sem=<\P.(P indonesia)>] -> 'Indonesia' 213 | PropN[num=sg, sem=<\P.(P indus)>] -> 'Indus' 214 | PropN[num=sg, sem=<\P.(P iran)>] -> 'Iran' 215 | PropN[num=sg, sem=<\P.(P iraq)>] -> 'Iraq' 216 | PropN[num=sg, sem=<\P.(P irish_pound)>] -> 'Irish_Pound' 217 | PropN[num=sg, sem=<\P.(P irrawaddy)>] -> 'Irrawaddy' 218 | PropN[num=sg, sem=<\P.(P islamad)>] -> 'Islamad' 219 | PropN[num=sg, sem=<\P.(P israel)>] -> 'Israel' 220 | PropN[num=sg, sem=<\P.(P israeli_pound)>] -> 'Israeli_Pound' 221 | PropN[num=sg, sem=<\P.(P istanbul)>] -> 'Istanbul' 222 | PropN[num=sg, sem=<\P.(P italian_lira)>] -> 'Italian_Lira' 223 | PropN[num=sg, sem=<\P.(P italy)>] -> 'Italy' 224 | PropN[num=sg, sem=<\P.(P ivory_coast)>] -> 'Ivory_Coast' 225 | PropN[num=sg, sem=<\P.(P jakarta)>] -> 'Jakarta' 226 | PropN[num=sg, sem=<\P.(P jamaica)>] -> 'Jamaica' 227 | PropN[num=sg, sem=<\P.(P jamaican_dollar)>] -> 'Jamaican_Dollar' 228 | PropN[num=sg, sem=<\P.(P japan)>] -> 'Japan' 229 | PropN[num=sg, sem=<\P.(P jerusalem)>] -> 'Jerusalem' 230 | PropN[num=sg, sem=<\P.(P johannesburg)>] -> 'Johannesburg' 231 | PropN[num=sg, sem=<\P.(P jordan)>] -> 'Jordan' 232 | PropN[num=sg, sem=<\P.(P kabul)>] -> 'Kabul' 233 | PropN[num=sg, sem=<\P.(P kampala)>] -> 'Kampala' 234 | PropN[num=sg, sem=<\P.(P karachi)>] -> 'Karachi' 235 | PropN[num=sg, sem=<\P.(P katmandu)>] -> 'Katmandu' 236 | PropN[num=sg, sem=<\P.(P kenya)>] -> 'Kenya' 237 | PropN[num=sg, sem=<\P.(P kenya_shilling)>] -> 'Kenya_Shilling' 238 | PropN[num=sg, sem=<\P.(P khartoum)>] -> 'Khartoum' 239 | PropN[num=sg, sem=<\P.(P kiev)>] -> 'Kiev' 240 | PropN[num=sg, sem=<\P.(P kigali)>] -> 'Kigali' 241 | PropN[num=sg, sem=<\P.(P kingston)>] -> 'Kingston' 242 | PropN[num=sg, sem=<\P.(P kinshasa)>] -> 'Kinshasa' 243 | PropN[num=sg, sem=<\P.(P kip)>] -> 'Kip' 244 | PropN[num=sg, sem=<\P.(P kobe)>] -> 'Kobe' 245 | PropN[num=sg, sem=<\P.(P koruna)>] -> 'Koruna' 246 | PropN[num=sg, sem=<\P.(P kowloon)>] -> 'Kowloon' 247 | PropN[num=sg, sem=<\P.(P krona)>] -> 'Krona' 248 | PropN[num=sg, sem=<\P.(P krone)>] -> 'Krone' 249 | PropN[num=sg, sem=<\P.(P kuala_lumpa)>] -> 'Kuala_Lumpa' 250 | PropN[num=sg, sem=<\P.(P kuwait)>] -> 'Kuwait' 251 | PropN[num=sg, sem=<\P.(P kuwait_city)>] -> 'Kuwait_City' 252 | PropN[num=sg, sem=<\P.(P kuwaiti_dinar)>] -> 'Kuwaiti_Dinar' 253 | PropN[num=sg, sem=<\P.(P kwacha)>] -> 'Kwacha' 254 | PropN[num=sg, sem=<\P.(P kyat)>] -> 'Kyat' 255 | PropN[num=sg, sem=<\P.(P kyoto)>] -> 'Kyoto' 256 | PropN[num=sg, sem=<\P.(P lagos)>] -> 'Lagos' 257 | PropN[num=sg, sem=<\P.(P laos)>] -> 'Laos' 258 | PropN[num=sg, sem=<\P.(P lebanese_pound)>] -> 'Lebanese_Pound' 259 | PropN[num=sg, sem=<\P.(P lebanon)>] -> 'Lebanon' 260 | PropN[num=sg, sem=<\P.(P lek)>] -> 'Lek' 261 | PropN[num=sg, sem=<\P.(P lempira)>] -> 'Lempira' 262 | PropN[num=sg, sem=<\P.(P lena)>] -> 'Lena' 263 | PropN[num=sg, sem=<\P.(P leningrad)>] -> 'Leningrad' 264 | PropN[num=sg, sem=<\P.(P leone)>] -> 'Leone' 265 | PropN[num=sg, sem=<\P.(P lesotho)>] -> 'Lesotho' 266 | PropN[num=sg, sem=<\P.(P leu)>] -> 'Leu' 267 | PropN[num=sg, sem=<\P.(P lev)>] -> 'Lev' 268 | PropN[num=sg, sem=<\P.(P liberia)>] -> 'Liberia' 269 | PropN[num=sg, sem=<\P.(P libreville)>] -> 'Libreville' 270 | PropN[num=sg, sem=<\P.(P libya)>] -> 'Libya' 271 | PropN[num=sg, sem=<\P.(P libyan_dinar)>] -> 'Libyan_Dinar' 272 | PropN[num=sg, sem=<\P.(P liechtenstein)>] -> 'Liechtenstein' 273 | PropN[num=sg, sem=<\P.(P lilageru)>] -> 'Lilageru' 274 | PropN[num=sg, sem=<\P.(P lima)>] -> 'Lima' 275 | PropN[num=sg, sem=<\P.(P limpopo)>] -> 'Limpopo' 276 | PropN[num=sg, sem=<\P.(P lira)>] -> 'Lira' 277 | PropN[num=sg, sem=<\P.(P lisbon)>] -> 'Lisbon' 278 | PropN[num=sg, sem=<\P.(P lome)>] -> 'Lome' 279 | PropN[num=sg, sem=<\P.(P london)>] -> 'London' 280 | PropN[num=sg, sem=<\P.(P los_angeles)>] -> 'Los_Angeles' 281 | PropN[num=sg, sem=<\P.(P luanda)>] -> 'Luanda' 282 | PropN[num=sg, sem=<\P.(P lusaka)>] -> 'Lusaka' 283 | PropN[num=sg, sem=<\P.(P luxembourg)>] -> 'Luxembourg' 284 | PropN[num=sg, sem=<\P.(P luxembourg_franc)>] -> 'Luxembourg_Franc' 285 | PropN[num=sg, sem=<\P.(P mackenzie)>] -> 'Mackenzie' 286 | PropN[num=sg, sem=<\P.(P madras)>] -> 'Madras' 287 | PropN[num=sg, sem=<\P.(P madrid)>] -> 'Madrid' 288 | PropN[num=sg, sem=<\P.(P malagasy)>] -> 'Malagasy' 289 | PropN[num=sg, sem=<\P.(P malawi)>] -> 'Malawi' 290 | PropN[num=sg, sem=<\P.(P malaysia)>] -> 'Malaysia' 291 | PropN[num=sg, sem=<\P.(P malaysian_dollar)>] -> 'Malaysian_Dollar' 292 | PropN[num=sg, sem=<\P.(P maldives)>] -> 'Maldives' 293 | PropN[num=sg, sem=<\P.(P male)>] -> 'Male' 294 | PropN[num=sg, sem=<\P.(P mali)>] -> 'Mali' 295 | PropN[num=sg, sem=<\P.(P mali_franc)>] -> 'Mali_Franc' 296 | PropN[num=sg, sem=<\P.(P malta)>] -> 'Malta' 297 | PropN[num=sg, sem=<\P.(P managua)>] -> 'Managua' 298 | PropN[num=sg, sem=<\P.(P manama)>] -> 'Manama' 299 | PropN[num=sg, sem=<\P.(P manila)>] -> 'Manila' 300 | PropN[num=sg, sem=<\P.(P maputo)>] -> 'Maputo' 301 | PropN[num=sg, sem=<\P.(P markka)>] -> 'Markka' 302 | PropN[num=sg, sem=<\P.(P masero)>] -> 'Masero' 303 | PropN[num=sg, sem=<\P.(P mauritania)>] -> 'Mauritania' 304 | PropN[num=sg, sem=<\P.(P mauritius)>] -> 'Mauritius' 305 | PropN[num=sg, sem=<\P.(P mbabane)>] -> 'Mbabane' 306 | PropN[num=sg, sem=<\P.(P mediterranean)>] -> 'the_Mediterranean' 307 | PropN[num=sg, sem=<\P.(P mekong)>] -> 'Mekong' 308 | PropN[num=sg, sem=<\P.(P melbourne)>] -> 'Melbourne' 309 | PropN[num=sg, sem=<\P.(P mexico)>] -> 'Mexico' 310 | PropN[num=sg, sem=<\P.(P mexico_city)>] -> 'Mexico_City' 311 | PropN[num=sg, sem=<\P.(P middle_east)>] -> 'Middle_East' 312 | PropN[num=sg, sem=<\P.(P milan)>] -> 'Milan' 313 | PropN[num=sg, sem=<\P.(P mississippi)>] -> 'Mississippi' 314 | PropN[num=sg, sem=<\P.(P mogadishu)>] -> 'Mogadishu' 315 | PropN[num=sg, sem=<\P.(P monaco)>] -> 'Monaco' 316 | PropN[num=sg, sem=<\P.(P mongolia)>] -> 'Mongolia' 317 | PropN[num=sg, sem=<\P.(P monrovia)>] -> 'Monrovia' 318 | PropN[num=sg, sem=<\P.(P montevideo)>] -> 'Montevideo' 319 | PropN[num=sg, sem=<\P.(P montreal)>] -> 'Montreal' 320 | PropN[num=sg, sem=<\P.(P morocco)>] -> 'Morocco' 321 | PropN[num=sg, sem=<\P.(P moscow)>] -> 'Moscow' 322 | PropN[num=sg, sem=<\P.(P mozambique)>] -> 'Mozambique' 323 | PropN[num=sg, sem=<\P.(P mukden)>] -> 'Mukden' 324 | PropN[num=sg, sem=<\P.(P murray)>] -> 'Murray' 325 | PropN[num=sg, sem=<\P.(P muscat)>] -> 'Muscat' 326 | PropN[num=sg, sem=<\P.(P n_djamena)>] -> 'N_Djamena' 327 | PropN[num=sg, sem=<\P.(P nagoya)>] -> 'Nagoya' 328 | PropN[num=sg, sem=<\P.(P naira)>] -> 'Naira' 329 | PropN[num=sg, sem=<\P.(P nairobi)>] -> 'Nairobi' 330 | PropN[num=sg, sem=<\P.(P nanking)>] -> 'Nanking' 331 | PropN[num=sg, sem=<\P.(P naples)>] -> 'Naples' 332 | PropN[num=sg, sem=<\P.(P nassau)>] -> 'Nassau' 333 | PropN[num=sg, sem=<\P.(P nepal)>] -> 'Nepal' 334 | PropN[num=sg, sem=<\P.(P nepalese_rupee)>] -> 'Nepalese_Rupee' 335 | PropN[num=sg, sem=<\P.(P netherlands)>] -> 'Netherlands' 336 | PropN[num=sg, sem=<\P.(P new_delhi)>] -> 'New_Delhi' 337 | PropN[num=sg, sem=<\P.(P new_york)>] -> 'New_York' 338 | PropN[num=sg, sem=<\P.(P new_zealand)>] -> 'New_Zealand' 339 | PropN[num=sg, sem=<\P.(P new_zealand_dollar)>] -> 'New_Zealand_Dollar' 340 | PropN[num=sg, sem=<\P.(P niamey)>] -> 'Niamey' 341 | PropN[num=sg, sem=<\P.(P nicaragua)>] -> 'Nicaragua' 342 | PropN[num=sg, sem=<\P.(P nicosia)>] -> 'Nicosia' 343 | PropN[num=sg, sem=<\P.(P niger)>] -> 'Niger' 344 | PropN[num=sg, sem=<\P.(P niger_river)>] -> 'Niger_River' 345 | PropN[num=sg, sem=<\P.(P nigeria)>] -> 'Nigeria' 346 | PropN[num=sg, sem=<\P.(P nile)>] -> 'Nile' 347 | PropN[num=sg, sem=<\P.(P north_africa)>] -> 'North_Africa' 348 | PropN[num=sg, sem=<\P.(P north_america)>] -> 'North_America' 349 | PropN[num=sg, sem=<\P.(P north_korea)>] -> 'North_Korea' 350 | PropN[num=sg, sem=<\P.(P northern_asia)>] -> 'Northern_Asia' 351 | PropN[num=sg, sem=<\P.(P norway)>] -> 'Norway' 352 | PropN[num=sg, sem=<\P.(P nouakchott)>] -> 'Nouakchott' 353 | PropN[num=sg, sem=<\P.(P nukualofa)>] -> 'Nukualofa' 354 | PropN[num=sg, sem=<\P.(P ob)>] -> 'Ob' 355 | PropN[num=sg, sem=<\P.(P oder)>] -> 'Oder' 356 | PropN[num=sg, sem=<\P.(P oman)>] -> 'Oman' 357 | PropN[num=sg, sem=<\P.(P orange)>] -> 'Orange' 358 | PropN[num=sg, sem=<\P.(P orinoco)>] -> 'Orinoco' 359 | PropN[num=sg, sem=<\P.(P osaka)>] -> 'Osaka' 360 | PropN[num=sg, sem=<\P.(P oslo)>] -> 'Oslo' 361 | PropN[num=sg, sem=<\P.(P ottawa)>] -> 'Ottawa' 362 | PropN[num=sg, sem=<\P.(P ouagadougou)>] -> 'Ouagadougou' 363 | PropN[num=sg, sem=<\P.(P ouguiya)>] -> 'Ouguiya' 364 | PropN[num=sg, sem=<\P.(P pa_anga)>] -> 'Pa_Anga' 365 | PropN[num=sg, sem=<\P.(P pacific)>] -> 'Pacific' 366 | PropN[num=sg, sem=<\P.(P pakistan)>] -> 'Pakistan' 367 | PropN[num=sg, sem=<\P.(P panama)>] -> 'Panama' 368 | PropN[num=sg, sem=<\P.(P papua_new_guinea)>] -> 'Papua_New_Guinea' 369 | PropN[num=sg, sem=<\P.(P paraguay)>] -> 'Paraguay' 370 | PropN[num=sg, sem=<\P.(P paramaribo)>] -> 'Paramaribo' 371 | PropN[num=sg, sem=<\P.(P parana)>] -> 'Parana' 372 | PropN[num=sg, sem=<\P.(P paris)>] -> 'Paris' 373 | PropN[num=sg, sem=<\P.(P pataca)>] -> 'Pataca' 374 | PropN[num=sg, sem=<\P.(P peking)>] -> 'Peking' 375 | PropN[num=sg, sem=<\P.(P persian_gulf)>] -> 'Persian_Gulf' 376 | PropN[num=sg, sem=<\P.(P peru)>] -> 'Peru' 377 | PropN[num=sg, sem=<\P.(P peseta)>] -> 'Peseta' 378 | PropN[num=sg, sem=<\P.(P peso)>] -> 'Peso' 379 | PropN[num=sg, sem=<\P.(P peveta)>] -> 'Peveta' 380 | PropN[num=sg, sem=<\P.(P philadelphia)>] -> 'Philadelphia' 381 | PropN[num=sg, sem=<\P.(P philippines)>] -> 'Philippines' 382 | PropN[num=sg, sem=<\P.(P phnom_penh)>] -> 'Phnom_Penh' 383 | PropN[num=sg, sem=<\P.(P piso)>] -> 'Piso' 384 | PropN[num=sg, sem=<\P.(P poland)>] -> 'Poland' 385 | PropN[num=sg, sem=<\P.(P port_au_prince)>] -> 'Port_Au_Prince' 386 | PropN[num=sg, sem=<\P.(P port_harcourt)>] -> 'Port_Harcourt' 387 | PropN[num=sg, sem=<\P.(P port_louis)>] -> 'Port_Louis' 388 | PropN[num=sg, sem=<\P.(P port_of_spain)>] -> 'Port_Of_Spain' 389 | PropN[num=sg, sem=<\P.(P porto_novo)>] -> 'Porto_Novo' 390 | PropN[num=sg, sem=<\P.(P portugal)>] -> 'Portugal' 391 | PropN[num=sg, sem=<\P.(P pound)>] -> 'Pound' 392 | PropN[num=sg, sem=<\P.(P prague)>] -> 'Prague' 393 | PropN[num=sg, sem=<\P.(P pretoria)>] -> 'Pretoria' 394 | PropN[num=sg, sem=<\P.(P pusan)>] -> 'Pusan' 395 | PropN[num=sg, sem=<\P.(P pvongvang)>] -> 'Pvongvang' 396 | PropN[num=sg, sem=<\P.(P qatar)>] -> 'Qatar' 397 | PropN[num=sg, sem=<\P.(P quetzal)>] -> 'Quetzal' 398 | PropN[num=sg, sem=<\P.(P quezon_city)>] -> 'Quezon_City' 399 | PropN[num=sg, sem=<\P.(P quito)>] -> 'Quito' 400 | PropN[num=sg, sem=<\P.(P rabat)>] -> 'Rabat' 401 | PropN[num=sg, sem=<\P.(P rand)>] -> 'Rand' 402 | PropN[num=sg, sem=<\P.(P rangoon)>] -> 'Rangoon' 403 | PropN[num=sg, sem=<\P.(P red_sea)>] -> 'Red_Sea' 404 | PropN[num=sg, sem=<\P.(P reykjavik)>] -> 'Reykjavik' 405 | PropN[num=sg, sem=<\P.(P rhine)>] -> 'Rhine' 406 | PropN[num=sg, sem=<\P.(P rhodesian_dollar)>] -> 'Rhodesian_Dollar' 407 | PropN[num=sg, sem=<\P.(P rhone)>] -> 'Rhone' 408 | PropN[num=sg, sem=<\P.(P rial)>] -> 'Rial' 409 | PropN[num=sg, sem=<\P.(P riel)>] -> 'Riel' 410 | PropN[num=sg, sem=<\P.(P rio_de_janeiro)>] -> 'Rio_De_Janeiro' 411 | PropN[num=sg, sem=<\P.(P rio_grande)>] -> 'Rio_Grande' 412 | PropN[num=sg, sem=<\P.(P riyadh)>] -> 'Riyadh' 413 | PropN[num=sg, sem=<\P.(P riyal)>] -> 'Riyal' 414 | PropN[num=sg, sem=<\P.(P riyal_omani)>] -> 'Riyal_Omani' 415 | PropN[num=sg, sem=<\P.(P romania)>] -> 'Romania' 416 | PropN[num=sg, sem=<\P.(P rome)>] -> 'Rome' 417 | PropN[num=sg, sem=<\P.(P ruble)>] -> 'Ruble' 418 | PropN[num=sg, sem=<\P.(P rupee)>] -> 'Rupee' 419 | PropN[num=sg, sem=<\P.(P rupiah)>] -> 'Rupiah' 420 | PropN[num=sg, sem=<\P.(P rwanda)>] -> 'Rwanda' 421 | PropN[num=sg, sem=<\P.(P rwanda_franc)>] -> 'Rwanda_Franc' 422 | PropN[num=sg, sem=<\P.(P saigon)>] -> 'Saigon' 423 | PropN[num=sg, sem=<\P.(P salisbury)>] -> 'Salisbury' 424 | PropN[num=sg, sem=<\P.(P salween)>] -> 'Salween' 425 | PropN[num=sg, sem=<\P.(P san_jose)>] -> 'San_Jose' 426 | PropN[num=sg, sem=<\P.(P san_marino)>] -> 'San_Marino' 427 | PropN[num=sg, sem=<\P.(P san_salvador)>] -> 'San_Salvador' 428 | PropN[num=sg, sem=<\P.(P sana)>] -> 'Sana' 429 | PropN[num=sg, sem=<\P.(P santa_domingo)>] -> 'Santa_Domingo' 430 | PropN[num=sg, sem=<\P.(P santa_isabel)>] -> 'Santa_Isabel' 431 | PropN[num=sg, sem=<\P.(P santiago)>] -> 'Santiago' 432 | PropN[num=sg, sem=<\P.(P sao_paulo)>] -> 'Sao_Paulo' 433 | PropN[num=sg, sem=<\P.(P saudi_arabia)>] -> 'Saudi_Arabia' 434 | PropN[num=sg, sem=<\P.(P scandinavia)>] -> 'Scandinavia' 435 | PropN[num=sg, sem=<\P.(P schilling)>] -> 'Schilling' 436 | PropN[num=sg, sem=<\P.(P senegal)>] -> 'Senegal' 437 | PropN[num=sg, sem=<\P.(P senegal_river)>] -> 'Senegal_River' 438 | PropN[num=sg, sem=<\P.(P seoul)>] -> 'Seoul' 439 | PropN[num=sg, sem=<\P.(P seychelles)>] -> 'Seychelles' 440 | PropN[num=sg, sem=<\P.(P shanghai)>] -> 'Shanghai' 441 | PropN[num=sg, sem=<\P.(P sian)>] -> 'Sian' 442 | PropN[num=sg, sem=<\P.(P sierra_leone)>] -> 'Sierra_Leone' 443 | PropN[num=sg, sem=<\P.(P singapore)>] -> 'Singapore' 444 | PropN[num=sg, sem=<\P.(P singapore_city)>] -> 'Singapore_City' 445 | PropN[num=sg, sem=<\P.(P singapore_dollar)>] -> 'Singapore_Dollar' 446 | PropN[num=sg, sem=<\P.(P sofia)>] -> 'Sofia' 447 | PropN[num=sg, sem=<\P.(P sol)>] -> 'Sol' 448 | PropN[num=sg, sem=<\P.(P somali_shilling)>] -> 'Somali_Shilling' 449 | PropN[num=sg, sem=<\P.(P somalia)>] -> 'Somalia' 450 | PropN[num=sg, sem=<\P.(P south_africa)>] -> 'South_Africa' 451 | PropN[num=sg, sem=<\P.(P south_african_rand)>] -> 'South_African_Rand' 452 | PropN[num=sg, sem=<\P.(P south_america)>] -> 'South_America' 453 | PropN[num=sg, sem=<\P.(P south_korea)>] -> 'South_Korea' 454 | PropN[num=sg, sem=<\P.(P south_yemen)>] -> 'South_Yemen' 455 | PropN[num=sg, sem=<\P.(P southeast_east)>] -> 'Southeast_East' 456 | PropN[num=sg, sem=<\P.(P southern_africa)>] -> 'Southern_Africa' 457 | PropN[num=sg, sem=<\P.(P southern_europe)>] -> 'Southern_Europe' 458 | PropN[num=sg, sem=<\P.(P southern_ocean)>] -> 'Southern_Ocean' 459 | PropN[num=sg, sem=<\P.(P soviet_union)>] -> 'Soviet_Union' 460 | PropN[num=sg, sem=<\P.(P spain)>] -> 'Spain' 461 | PropN[num=sg, sem=<\P.(P sri_lanka)>] -> 'Sri_Lanka' 462 | PropN[num=sg, sem=<\P.(P st_georges)>] -> 'St_Georges' 463 | PropN[num=sg, sem=<\P.(P stockholm)>] -> 'Stockholm' 464 | PropN[num=sg, sem=<\P.(P sucre)>] -> 'Sucre' 465 | PropN[num=sg, sem=<\P.(P sudan)>] -> 'Sudan' 466 | PropN[num=sg, sem=<\P.(P surinam)>] -> 'Surinam' 467 | PropN[num=sg, sem=<\P.(P suva)>] -> 'Suva' 468 | PropN[num=sg, sem=<\P.(P swaziland)>] -> 'Swaziland' 469 | PropN[num=sg, sem=<\P.(P sweden)>] -> 'Sweden' 470 | PropN[num=sg, sem=<\P.(P swiss_franc)>] -> 'Swiss_Franc' 471 | PropN[num=sg, sem=<\P.(P switzerland)>] -> 'Switzerland' 472 | PropN[num=sg, sem=<\P.(P sydney)>] -> 'Sydney' 473 | PropN[num=sg, sem=<\P.(P syli)>] -> 'Syli' 474 | PropN[num=sg, sem=<\P.(P syria)>] -> 'Syria' 475 | PropN[num=sg, sem=<\P.(P syrian_pound)>] -> 'Syrian_Pound' 476 | PropN[num=sg, sem=<\P.(P tagus)>] -> 'Tagus' 477 | PropN[num=sg, sem=<\P.(P taipei)>] -> 'Taipei' 478 | PropN[num=sg, sem=<\P.(P taiwan)>] -> 'Taiwan' 479 | PropN[num=sg, sem=<\P.(P taiwan_dollar)>] -> 'Taiwan_Dollar' 480 | PropN[num=sg, sem=<\P.(P taka)>] -> 'Taka' 481 | PropN[num=sg, sem=<\P.(P tala)>] -> 'Tala' 482 | PropN[num=sg, sem=<\P.(P tananarive)>] -> 'Tananarive' 483 | PropN[num=sg, sem=<\P.(P tanzania)>] -> 'Tanzania' 484 | PropN[num=sg, sem=<\P.(P tanzanian_shilling)>] -> 'Tanzanian_Shilling' 485 | PropN[num=sg, sem=<\P.(P tegucigalpa)>] -> 'Tegucigalpa' 486 | PropN[num=sg, sem=<\P.(P tehran)>] -> 'Tehran' 487 | PropN[num=sg, sem=<\P.(P thailand)>] -> 'Thailand' 488 | PropN[num=sg, sem=<\P.(P thimphu)>] -> 'Thimphu' 489 | PropN[num=sg, sem=<\P.(P tientsin)>] -> 'Tientsin' 490 | PropN[num=sg, sem=<\P.(P tighrik)>] -> 'Tighrik' 491 | PropN[num=sg, sem=<\P.(P tirana)>] -> 'Tirana' 492 | PropN[num=sg, sem=<\P.(P togo)>] -> 'Togo' 493 | PropN[num=sg, sem=<\P.(P tokyo)>] -> 'Tokyo' 494 | PropN[num=sg, sem=<\P.(P tonga)>] -> 'Tonga' 495 | PropN[num=sg, sem=<\P.(P toronto)>] -> 'Toronto' 496 | PropN[num=sg, sem=<\P.(P trinidad_and_tobago)>] -> 'Trinidad_And_Tobago' 497 | PropN[num=sg, sem=<\P.(P trinidad_and_tobago_dollar)>] -> 'Trinidad_And_Tobago_Dollar' 498 | PropN[num=sg, sem=<\P.(P tripoli)>] -> 'Tripoli' 499 | PropN[num=sg, sem=<\P.(P tropic_of_cancer)>] -> 'Tropic_Of_Cancer' 500 | PropN[num=sg, sem=<\P.(P tropic_of_capricorn)>] -> 'Tropic_Of_Capricorn' 501 | PropN[num=sg, sem=<\P.(P tunis)>] -> 'Tunis' 502 | PropN[num=sg, sem=<\P.(P tunisia)>] -> 'Tunisia' 503 | PropN[num=sg, sem=<\P.(P turkey)>] -> 'Turkey' 504 | PropN[num=sg, sem=<\P.(P uganda)>] -> 'Uganda' 505 | PropN[num=sg, sem=<\P.(P uganda_shilling)>] -> 'Uganda_Shilling' 506 | PropN[num=sg, sem=<\P.(P ulan_bator)>] -> 'Ulan_Bator' 507 | PropN[num=sg, sem=<\P.(P united_arab_emirates)>] -> 'United_Arab_Emirates' 508 | PropN[num=sg, sem=<\P.(P united_kingdom)>] -> 'United_Kingdom' 509 | PropN[num=sg, sem=<\P.(P united_states)>] -> 'United_States' 510 | PropN[num=sg, sem=<\P.(P upper_volta)>] -> 'Upper_Volta' 511 | PropN[num=sg, sem=<\P.(P uruguay)>] -> 'Uruguay' 512 | PropN[num=sg, sem=<\P.(P us_dollar)>] -> 'Us_Dollar' 513 | PropN[num=sg, sem=<\P.(P vaduz)>] -> 'Vaduz' 514 | PropN[num=sg, sem=<\P.(P valetta)>] -> 'Valetta' 515 | PropN[num=sg, sem=<\P.(P venezuela)>] -> 'Venezuela' 516 | PropN[num=sg, sem=<\P.(P victoria)>] -> 'Victoria' 517 | PropN[num=sg, sem=<\P.(P vienna)>] -> 'Vienna' 518 | PropN[num=sg, sem=<\P.(P vientiane)>] -> 'Vientiane' 519 | PropN[num=sg, sem=<\P.(P vietnam)>] -> 'Vietnam' 520 | PropN[num=sg, sem=<\P.(P vistula)>] -> 'Vistula' 521 | PropN[num=sg, sem=<\P.(P volga)>] -> 'Volga' 522 | PropN[num=sg, sem=<\P.(P volta)>] -> 'Volta' 523 | PropN[num=sg, sem=<\P.(P warsaw)>] -> 'Warsaw' 524 | PropN[num=sg, sem=<\P.(P washington)>] -> 'Washington' 525 | PropN[num=sg, sem=<\P.(P wellington)>] -> 'Wellington' 526 | PropN[num=sg, sem=<\P.(P west_africa)>] -> 'West_Africa' 527 | PropN[num=sg, sem=<\P.(P west_germany)>] -> 'West_Germany' 528 | PropN[num=sg, sem=<\P.(P western_europe)>] -> 'Western_Europe' 529 | PropN[num=sg, sem=<\P.(P western_samoa)>] -> 'Western_Samoa' 530 | PropN[num=sg, sem=<\P.(P won)>] -> 'Won' 531 | PropN[num=sg, sem=<\P.(P yangtze)>] -> 'Yangtze' 532 | PropN[num=sg, sem=<\P.(P yaounde)>] -> 'Yaounde' 533 | PropN[num=sg, sem=<\P.(P yemen)>] -> 'Yemen' 534 | PropN[num=sg, sem=<\P.(P yen)>] -> 'Yen' 535 | PropN[num=sg, sem=<\P.(P yenisei)>] -> 'Yenisei' 536 | PropN[num=sg, sem=<\P.(P yokohama)>] -> 'Yokohama' 537 | PropN[num=sg, sem=<\P.(P yuan)>] -> 'Yuan' 538 | PropN[num=sg, sem=<\P.(P yugoslavia)>] -> 'Yugoslavia' 539 | PropN[num=sg, sem=<\P.(P yukon)>] -> 'Yukon' 540 | PropN[num=sg, sem=<\P.(P zaire)>] -> 'Zaire' 541 | PropN[num=sg, sem=<\P.(P zambesi)>] -> 'Zambesi' 542 | PropN[num=sg, sem=<\P.(P zambia)>] -> 'Zambia' 543 | PropN[num=sg, sem=<\P.(P zimbabwe)>] -> 'Zimbabwe' 544 | PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty' 545 | PropN[num=sg, sem=<\P.(P zomba)>] -> 'Zomba' 546 | --------------------------------------------------------------------------------