├── .editorconfig
├── .gitattributes
├── .gitignore
├── .travis.yml
├── LICENSE
├── Pipfile
├── Pipfile.lock
├── README.md
├── docs
├── README.md
├── __init__.py
├── md_autogen.py
├── mkdocs.yml
├── templates
│ ├── css
│ │ └── extras.css
│ └── visualizations
│ │ ├── activation_maximization.md
│ │ ├── class_activation_maps.md
│ │ └── saliency.md
└── update_docs.py
├── examples
├── cnn_imdb.py
└── fasttext_imdb.py
├── pytest.ini
├── requirements.txt
├── setup.cfg
├── setup.py
├── tests
├── embeddings.py
├── models
│ ├── test_sentence_model.py
│ └── test_token_model.py
├── preprocessing.py
└── train.py
└── texcla
├── __init__.py
├── corpus.py
├── data.py
├── embeddings.py
├── experiment.py
├── libs
├── __init__.py
├── fastTextWikiTokenizer
│ ├── __init__.py
│ ├── subprocess_fix.py
│ └── tokenize.py
├── ngrams
│ ├── __init__.py
│ └── ngrams.py
└── twokenize
│ ├── LICENSE
│ ├── __init__.py
│ └── twokenize.py
├── models
├── __init__.py
├── layers.py
├── sentence_model.py
├── sequence_encoders.py
└── token_model.py
├── preprocessing
├── __init__.py
├── char_tokenizer.py
├── sentence_tokenizer.py
├── tokenizer.py
├── utils.py
└── word_tokenizer.py
└── utils
├── __init__.py
├── format.py
├── generators.py
├── io.py
└── sampling.py
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 |
3 | root = true
4 |
5 | [*]
6 | indent_style = space
7 | indent_size = 4
8 | insert_final_newline = true
9 | trim_trailing_whitespace = true
10 | end_of_line = lf
11 | charset = utf-8
12 |
13 | [*.py]
14 | max_line_length = 119
15 |
16 | [*.md]
17 | insert_final_newline = false
18 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Count notebooks as python.
2 | *.ipynb linguist-language=Python
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # dev-env
2 | .idea/
3 | *.iml
4 |
5 | # docs
6 | docs/site/*
7 | docs/sources/*
8 |
9 | # test-related
10 | .coverage
11 | .cache
12 |
13 | text_classification_keras.egg-info
14 | dist/*
15 | build/*
16 | *.pyc
17 | .ipynb_checkpoints
18 |
19 | # Datasets
20 | examples/datasets/*
21 | .vscode
22 | .pytest_cache
23 |
24 | *.bin
25 | experiments
26 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | dist: trusty
2 | language: python
3 | cache:
4 | packages: true
5 | directories:
6 | - $HOME/.keras
7 | - $HOME/.cache/pip
8 |
9 | matrix:
10 | include:
11 | - python: 2.7
12 | env: KERAS_BACKEND=tensorflow TEST_MODE=PEP8
13 | - python: 2.7
14 | env: KERAS_BACKEND=tensorflow
15 | - python: 3.5
16 | env: KERAS_BACKEND=tensorflow
17 | - python: 3.6
18 | env: KERAS_BACKEND=tensorflow
19 |
20 | install:
21 | - pip install pytest-pep8 pytest-cov
22 |
23 | - pip install --upgrade pytest
24 |
25 | # install TensorFlow
26 | - pip install tensorflow
27 |
28 | # install package locally
29 | - pip install .[full]
30 |
31 | # download spacy model
32 | - "python -m spacy download en"
33 |
34 | script:
35 | # run keras backend init to initialize backend config
36 | - python -c "import keras.backend"
37 | # set up keras backend
38 | - sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
39 | - echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)"
40 | - if [[ "$TEST_MODE" == "PEP8" ]]; then
41 | PYTHONPATH=$PWD:$PYTHONPATH pytest --pep8 -m pep8;
42 | else
43 | PYTHONPATH=$PWD:$PYTHONPATH/tests pytest tests/*;
44 | fi
45 |
46 | notifications:
47 | email: false
48 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Raghavendra Kotikalapudi, Johannes Filter
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [packages]
7 | keras = "*"
8 | tensorflow = "*"
9 | six = "*"
10 | spacy = "*"
11 | joblib = "*"
12 | jsonpickle = "*"
13 | scikit-learn = "*"
14 | numpy = "*"
15 | deep-plots = "*"
16 | text-classification-keras = {editable = true,path = "."}
17 |
18 | [dev-packages]
19 | pylint = "*"
20 | autopep8 = "*"
21 | pytest = "*"
22 | mkdocs = "*"
23 | python-markdown-math = "*"
24 |
25 | [requires]
26 | python_version = "3.6"
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Text Classification Keras [](https://travis-ci.com/jfilter/text-classification-keras) [](https://pypi.org/project/text-classification-keras/) [](https://pypi.org/project/text-classification-keras/) [](https://gitter.im/text-classification-keras/Lobby)
2 |
3 | A high-level text classification library implementing various well-established models. With a clean and extendable interface to implement custom architectures.
4 |
5 | ## Quick start
6 |
7 | ### Install
8 |
9 | ```bash
10 | pip install text-classification-keras[full]
11 | ```
12 |
13 | The `[full]` will additionally install [TensorFlow](https://github.com/tensorflow/tensorflow), [Spacy](https://github.com/explosion/spaCy), and [Deep Plots](https://github.com/jfilter/deep-plots). Choose this if you want to get started right away.
14 |
15 | ### Usage
16 |
17 | ```python
18 | from texcla import experiment, data
19 | from texcla.models import TokenModelFactory, YoonKimCNN
20 | from texcla.preprocessing import FastTextWikiTokenizer
21 |
22 | # input text
23 | X = ['some random text', 'another random text lala', 'peter', ...]
24 |
25 | # input labels
26 | y = ['a', 'b', 'a', ...]
27 |
28 | # use the special tokenizer used for constructing the embeddings
29 | tokenizer = FastTextWikiTokenizer()
30 |
31 | # preprocess data (once)
32 | experiment.setup_data(X, y, tokenizer, 'data.bin', max_len=100)
33 |
34 | # load data
35 | ds = data.Dataset.load('data.bin')
36 |
37 | # construct base
38 | factory = TokenModelFactory(
39 | ds.num_classes, ds.tokenizer.token_index, max_tokens=100,
40 | embedding_type='fasttext.wiki.simple', embedding_dims=300)
41 |
42 | # choose a model
43 | word_encoder_model = YoonKimCNN()
44 |
45 | # build a model
46 | model = factory.build_model(
47 | token_encoder_model=word_encoder_model, trainable_embeddings=False)
48 |
49 | # use experiment.train as wrapper for Keras.fit()
50 | experiment.train(x=ds.X, y=ds.y, validation_split=0.1, model=model,
51 | word_encoder_model=word_encoder_model)
52 | ```
53 |
54 | Check out more [examples](./examples).
55 |
56 | ## API Documenation
57 |
58 |
59 |
60 | ## Advanced
61 |
62 | ### Embeddings
63 |
64 | Choose a pre-trained word embedding by setting the embedding_type and the corresponding embedding dimensions. Set `embedding_type=None` to initialize the word embeddings randomly (but make sure to set `trainable_embeddings=True` so you actually train the embeddings).
65 |
66 | ```python
67 | factory = TokenModelFactory(embedding_type='fasttext.wiki.simple', embedding_dims=300)
68 | ```
69 |
70 | #### FastText
71 |
72 | Several pre-trained [FastText](https://fasttext.cc/) embeddings are included. For now, we only have the word embeddings and not the n-gram features. All embedding have 300 dimensions.
73 |
74 | - [English Vectors](https://fasttext.cc/docs/en/english-vectors.html): e.g. `fasttext.wn.1M.300d`, [check out all avaiable embeddings](https://github.com/jfilter/text-classification-keras/blob/master/texcla/embeddings.py#L19)
75 | - [Multilang Vectors](https://fasttext.cc/docs/en/crawl-vectors.html): in the format `fasttext.cc.LANG_CODE` e.g. `fasttext.cc.en`
76 | - [Wikipedia Vectors](https://fasttext.cc/docs/en/pretrained-vectors.html): in the format `fasttext.wiki.LANG_CODE` e.g. `fasttext.wiki.en`
77 |
78 | #### GloVe
79 |
80 | The [GloVe](https://nlp.stanford.edu/projects/glove/) embeddings are some kind of predecessor to FastText. In general choose FastText embeddings over GloVe. The dimension for the pre-trained embeddings varies.
81 |
82 | - : e.g. `glove.6B.50d`, [check out all avaiable embeddings](https://github.com/jfilter/text-classification-keras/blob/master/texcla/embeddings.py#L19)
83 |
84 | ### Tokenzation
85 |
86 | - To work on token (or word) level, use a TokenTokenizer such e.g. `TwokenizeTokenizer` or `SpacyTokenizer`.
87 | - To work on token and sentence level, use `SpacySentenceTokenizer`.
88 | - To create an custom Tokenizer, extend `Tokenizer` and implement the `token_generator` method.
89 |
90 | #### Spacy
91 |
92 | You may use [spaCy](https://spacy.io/) for the tokenization. See instructions on how to
93 | [download model](https://spacy.io/docs/usage/models#download) for your target language. E.g. for English:
94 |
95 | ```bash
96 | python -m spacy download en
97 | ```
98 |
99 | ### Models
100 |
101 | #### Token-based Models
102 |
103 | When working on token level, use `TokenModelFactory`.
104 |
105 | ```python
106 | from texcla.models import TokenModelFactory, YoonKimCNN
107 |
108 | factory = TokenModelFactory(tokenizer.num_classes, tokenizer.token_index,
109 | max_tokens=100, embedding_type='glove.6B.100d')
110 | word_encoder_model = YoonKimCNN()
111 | model = factory.build_model(token_encoder_model=word_encoder_model)
112 | ```
113 |
114 | Currently supported models include:
115 |
116 | - [Yoon Kim CNN](https://arxiv.org/abs/1408.5882)
117 | - [Stacked RNNs](https://arxiv.org/abs/1312.6026)
118 | - [Attention (with/without context) based RNN encoders](https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf)
119 |
120 | `TokenModelFactory.build_model` uses the provided word encoder which is then classified via a [Dense](https://keras.io/layers/core/#dense) layer.
121 |
122 | #### Sentence-based Models
123 |
124 | When working on sentence level, use `SentenceModelFactory`.
125 |
126 | ```python
127 | # Pad max sentences per doc to 500 and max words per sentence to 200.
128 | # Can also use `max_sents=None` to allow variable sized max_sents per mini-batch.
129 |
130 | factory = SentenceModelFactory(10, tokenizer.token_index, max_sents=500,
131 | max_tokens=200, embedding_type='glove.6B.100d')
132 | word_encoder_model = AttentionRNN()
133 | sentence_encoder_model = AttentionRNN()
134 |
135 | # Allows you to compose arbitrary word encoders followed by sentence encoder.
136 | model = factory.build_model(word_encoder_model, sentence_encoder_model)
137 | ```
138 |
139 | - [Hierarchical attention networks](http://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf)
140 | (HANs) can be build by composing two attention based RNN models. This is useful when a document is very large.
141 | - For smaller document a reasonable way to encode sentences is to average words within it. This can be done by using
142 | `token_encoder_model=AveragingEncoder()`
143 | - Mix and match encoders as you see fit for your problem.
144 |
145 | `SentenceModelFactory.build_model` created a tiered model where words within a sentence is first encoded using
146 | `word_encoder_model`. All such encodings per sentence is then encoded using `sentence_encoder_model`.
147 |
148 | ## Related
149 |
150 | - https://github.com/brightmart/text_classification
151 | - https://github.com/allenai/allennlp
152 | - https://github.com/facebookresearch/pytext
153 | - https://docs.fast.ai/text.html
154 | - https://github.com/dkpro/dkpro-tc
155 |
156 | ## Contributing
157 |
158 | If you have a **question**, found a **bug** or want to propose a new **feature**, have a look at the [issues page](https://github.com/jfilter/text-classification-keras/issues).
159 |
160 | **Pull requests** are especially welcomed when they fix bugs or improve the code quality.
161 |
162 | ## Acknowledgements
163 |
164 | Built upon the work by Raghavendra Kotikalapudi: [keras-text](https://github.com/raghakot/keras-text).
165 |
166 | ## Citation
167 |
168 | If you find Text Classification Keras useful for an academic publication, then please use the following BibTeX to cite it:
169 |
170 | ```tex
171 | @misc{raghakotfiltertexclakeras
172 | title={Text Classification Keras},
173 | author={Raghavendra Kotikalapudi, and Johannes Filter, and contributors},
174 | year={2018},
175 | publisher={GitHub},
176 | howpublished={\url{https://github.com/jfilter/text-classification-keras}},
177 | }
178 | ```
179 |
180 | ## License
181 |
182 | MIT.
183 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Documenation
2 |
3 | We use extended Markdown, as implemented by [MkDocs](http://mkdocs.org), for documentation.
4 | The API docs are autogenerated using `MarkdownAPIGenerator` in `md_autogen.py`.
5 |
6 | ## Dependencies
7 |
8 | ```bash
9 | sudo pip install mkdocs
10 | sudo pip install https://github.com/mitya57/python-markdown-math/archive/master.zip
11 | ```
12 |
13 | ## Updating Documenation
14 |
15 | Run `./update_docs.py [--live]` . It will
16 |
17 | - Copy README.md to `templates/index.md`
18 | - Generate API docs for modules.
19 | - `mkdocs build` without `--live`: build site to and start http server localhost:8000 to debug docs
20 | - `mkdocs gh-deploy` with `--live`: deploy docs live on Github
21 |
--------------------------------------------------------------------------------
/docs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jfilter/text-classification-keras/85882306d00242a4e6ead90d94f8a1f98a86535a/docs/__init__.py
--------------------------------------------------------------------------------
/docs/md_autogen.py:
--------------------------------------------------------------------------------
1 | """
2 | Parses source code to generate API docs in markdown.
3 | """
4 |
5 | import os
6 | import re
7 | import inspect
8 | from inspect import getdoc, getargspec, getsourcefile, getsourcelines, getmembers
9 | from collections import defaultdict
10 |
11 | import sys
12 | if sys.version[0] == '2':
13 | reload(sys)
14 | sys.setdefaultencoding('utf8')
15 |
16 | _RE_BLOCKSTART = re.compile(r"(Args:|Arg:|Kwargs:|Returns:|Yields:|Kwargs:|Raises:|Notes:|Note:|Examples:|Example:)",
17 | re.IGNORECASE)
18 | _RE_ARGSTART = re.compile(r"(\w*?)\s*?\((.*?)\):(.*)", re.IGNORECASE)
19 | _RE_EXCSTART = re.compile(r"(\w*?):(.*)", re.IGNORECASE)
20 | _RE_URL = re.compile(
21 | r"(https?:\/\/(.+?)(\/.*).*(\.html))", re.IGNORECASE)
22 |
23 |
24 | #
25 | # String templates
26 | #
27 |
28 | FUNC_TEMPLATE = """-------------------
29 |
30 | {section} [{header}]({path})
31 |
32 | ```python
33 | {funcdef}
34 | ```
35 |
36 | {doc}
37 |
38 | """
39 |
40 | CLASS_TEMPLATE = """-------------------
41 |
42 | {section} [{header}]({path})
43 |
44 | {doc}
45 |
46 | {variables}
47 |
48 | {init}
49 |
50 | {handlers}
51 |
52 | {methods}
53 |
54 | """
55 |
56 | MODULE_TEMPLATE = """
57 | **Source:** {path}
58 |
59 | {global_vars}
60 |
61 | {functions}
62 |
63 | {classes}
64 |
65 | """
66 |
67 |
68 | def make_iter(obj):
69 | """ Makes an iterable
70 | """
71 | return obj if hasattr(obj, '__iter__') else [obj]
72 |
73 |
74 | def order_by_line_nos(objs, line_nos):
75 | """Orders the set of `objs` by `line_nos`
76 | """
77 | ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
78 | return [objs[i] for i in ordering]
79 |
80 |
81 | def to_md_file(string, filename, out_path="."):
82 | """Import a module path and create an api doc from it
83 |
84 | Args:
85 | string (str): string with line breaks to write to file.
86 | filename (str): filename without the .md
87 | out_path (str): The output directory
88 | """
89 | md_file = "%s.md" % filename
90 | with open(os.path.join(out_path, md_file), "w") as f:
91 | f.write(string)
92 | print("wrote {}.".format(md_file))
93 |
94 |
95 | class MarkdownAPIGenerator(object):
96 | def __init__(self, src_root, github_link):
97 | """Initializes the markdown api generator.
98 |
99 | Args:
100 | src_root: The root folder name containing all the sources.
101 | Ex: src
102 | github_link: The base github link. Should include branch name.
103 | Ex: https://github.com/raghakot/keras-vis/tree/master
104 | All source links are generated with this prefix.
105 | """
106 | self.src_root = src_root
107 | self.github_link = github_link
108 |
109 | def get_line_no(self, obj):
110 | """Gets the source line number of this object. None if `obj` code cannot be found.
111 | """
112 | try:
113 | lineno = getsourcelines(obj)[1]
114 | except:
115 | # no code found
116 | lineno = None
117 | return lineno
118 |
119 | def get_src_path(self, obj, append_base=True):
120 | """Creates a src path string with line info for use as markdown link.
121 | """
122 | path = getsourcefile(obj)
123 | if self.src_root not in path:
124 | # this can happen with e.g.
125 | # inlinefunc-wrapped functions
126 | if hasattr(obj, "__module__"):
127 | path = "%s.%s" % (obj.__module__, obj.__name__)
128 | else:
129 | path = obj.__name__
130 | path = path.replace(".", "/")
131 | pre, post = path.rsplit(self.src_root + "/", 1)
132 |
133 | lineno = self.get_line_no(obj)
134 | lineno = "" if lineno is None else "#L{}".format(lineno)
135 |
136 | path = self.src_root + "/" + post + lineno
137 | if append_base:
138 | path = os.path.join(self.github_link, path)
139 | return path
140 |
141 | def doc2md(self, func):
142 | """Parse docstring (parsed with getdoc) according to Google-style
143 | formatting and convert to markdown. We support the following
144 | Google style syntax:
145 |
146 | Args, Kwargs:
147 | argname (type): text
148 | freeform text
149 | Returns, Yields:
150 | retname (type): text
151 | freeform text
152 | Raises:
153 | exceptiontype: text
154 | freeform text
155 | Notes, Examples:
156 | freeform text
157 |
158 | """
159 | doc = getdoc(func) or ""
160 | blockindent = 0
161 | argindent = 1
162 | out = []
163 |
164 | for line in doc.split("\n"):
165 | indent = len(line) - len(line.lstrip())
166 | line = line.lstrip()
167 | if _RE_BLOCKSTART.match(line):
168 | # start of a new block
169 | blockindent = indent
170 | out.append("\n*{}*\n".format(line))
171 | elif indent > blockindent:
172 | if _RE_ARGSTART.match(line):
173 | # start of new argument
174 | out.append("\n" + " " * blockindent + " - " +
175 | _RE_ARGSTART.sub(r"**\1** (\2): \3", line))
176 | argindent = indent
177 | elif _RE_EXCSTART.match(line):
178 | # start of an exception-type block
179 | out.append("\n" + " " * blockindent + " - " +
180 | _RE_EXCSTART.sub(r"**\1**: \2", line))
181 | argindent = indent
182 | elif indent > argindent:
183 | out.append("\n" + " " * (blockindent + 2) + line)
184 | else:
185 | out.append("\n" + line)
186 | else:
187 | out.append("\n" + line)
188 |
189 | return "".join(out)
190 |
191 | def func2md(self, func, clsname=None, names=None, depth=3):
192 | """Takes a function (or method) and documents it.
193 |
194 | Args:
195 | clsname (str, optional): class name to prepend to funcname.
196 | depth (int, optional): number of ### to append to function name
197 |
198 | """
199 | section = "#" * depth
200 | if names is None:
201 | names = [func.__name__]
202 |
203 | funcname = ", ".join(names)
204 | escfuncname = ", ".join(["`%s`" % funcname if funcname.startswith(
205 | "_") else funcname for funcname in names])
206 | header = "%s%s" % ("%s." % clsname if clsname else "", escfuncname)
207 |
208 | path = self.get_src_path(func)
209 | doc = self.doc2md(func)
210 |
211 | args, kwargs = [], []
212 | spec = getargspec(func)
213 | vargsname, kwargsname = spec.varargs, spec.keywords
214 | vargs = list(make_iter(spec.args)) if spec.args else []
215 | defaults = list(make_iter(spec.defaults)) if spec.defaults else []
216 |
217 | while vargs:
218 | if vargs and vargs[0] == "self":
219 | args.append(vargs.pop(0))
220 | elif len(vargs) > len(defaults):
221 | args.append(vargs.pop(0))
222 | else:
223 | default = defaults.pop(0)
224 | if isinstance(default, str):
225 | default = "\"%s\"" % default
226 | else:
227 | default = "%s" % str(default)
228 |
229 | kwargs.append((vargs.pop(0), default))
230 |
231 | if args:
232 | args = ", ".join("%s" % arg for arg in args)
233 | if kwargs:
234 | kwargs = ", ".join("%s=%s" % kwarg for kwarg in kwargs)
235 | if args:
236 | kwargs = ", " + kwargs
237 | if vargsname:
238 | vargsname = "*%s" % vargsname
239 | if args or kwargs:
240 | vargsname = ", " + vargsname
241 | if kwargsname:
242 | kwargsname = "**%s" % kwargsname
243 | if args or kwargs or vargsname:
244 | kwargsname = ", " + kwargsname
245 |
246 | _FUNCDEF = "{funcname}({args}{kwargs}{vargs}{vkwargs})"
247 | funcdef = _FUNCDEF.format(funcname=funcname,
248 | args=args or "",
249 | kwargs=kwargs or "",
250 | vargs=vargsname or "",
251 | vkwargs=kwargsname or "")
252 |
253 | # split the function definition if it is too long
254 | lmax = 90
255 | if len(funcdef) > lmax:
256 | # wrap in the args list
257 | split = funcdef.split("(", 1)
258 | # we gradually build the string again
259 | rest = split[1]
260 | args = rest.split(", ")
261 |
262 | funcname = "(".join(split[:1]) + "("
263 | lline = len(funcname)
264 | parts = []
265 | for arg in args:
266 | larg = len(arg)
267 | if larg > lmax - 5:
268 | # not much to do if arg is so long
269 | parts.append(arg)
270 | elif lline + larg > lmax:
271 | # the next arg is too long, break the line
272 | parts.append("\\\n " + arg)
273 | lline = 0
274 | else:
275 | parts.append(arg)
276 | lline += len(parts[-1])
277 | funcdef = funcname + ", ".join(parts)
278 |
279 | # build the signature
280 | string = FUNC_TEMPLATE.format(section=section,
281 | header=header,
282 | funcdef=funcdef,
283 | path=path,
284 | doc=doc if doc else "*No documentation found.*")
285 | return string
286 |
287 | def class2md(self, cls, depth=2):
288 | """Takes a class and creates markdown text to document its methods and variables.
289 | """
290 |
291 | section = "#" * depth
292 | subsection = "#" * (depth + 2)
293 | clsname = cls.__name__
294 | modname = cls.__module__
295 | header = clsname
296 | path = self.get_src_path(cls)
297 | doc = self.doc2md(cls)
298 |
299 | try:
300 | init = self.func2md(cls.__init__, clsname=clsname)
301 | except (ValueError, TypeError):
302 | # this happens if __init__ is outside the repo
303 | init = ""
304 |
305 | variables = []
306 | for name, obj in getmembers(cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a))):
307 | if not name.startswith("_") and type(obj) == property:
308 | comments = self.doc2md(obj) or inspect.getcomments(obj)
309 | comments = "\n %s" % comments if comments else ""
310 | variables.append("\n%s %s.%s%s\n" %
311 | (subsection, clsname, name, comments))
312 |
313 | handlers = []
314 | for name, obj in getmembers(cls, inspect.ismethoddescriptor):
315 | if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
316 | handlers.append("\n%s %s.%s\n *Handler*" %
317 | (subsection, clsname, name))
318 |
319 | methods = []
320 | for name, obj in getmembers(cls, inspect.ismethod):
321 | if not name.startswith("_") and hasattr(obj,
322 | "__module__") and obj.__module__ == modname and name not in handlers:
323 | methods.append(self.func2md(
324 | obj, clsname=clsname, depth=depth + 1))
325 |
326 | string = CLASS_TEMPLATE.format(section=section,
327 | header=header,
328 | path=path,
329 | doc=doc if doc else "",
330 | init=init,
331 | variables="".join(variables),
332 | handlers="".join(handlers),
333 | methods="".join(methods))
334 | return string
335 |
336 | def module2md(self, module):
337 | """Takes an imported module object and create a Markdown string containing functions and classes.
338 | """
339 | modname = module.__name__
340 | path = self.get_src_path(module, append_base=False)
341 | path = "[{}]({})".format(path, os.path.join(self.github_link, path))
342 | found = set()
343 |
344 | classes = []
345 | line_nos = []
346 | for name, obj in getmembers(module, inspect.isclass):
347 | # handle classes
348 | found.add(name)
349 | if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
350 | classes.append(self.class2md(obj))
351 | line_nos.append(self.get_line_no(obj) or 0)
352 | classes = order_by_line_nos(classes, line_nos)
353 |
354 | # Since functions can have multiple aliases.
355 | func2names = defaultdict(list)
356 | for name, obj in getmembers(module, inspect.isfunction):
357 | func2names[obj].append(name)
358 |
359 | functions = []
360 | line_nos = []
361 | for obj in func2names:
362 | names = func2names[obj]
363 | found.update(names)
364 |
365 | # Include if within module or included modules within __init__.py and exclude from global variables
366 | is_module_within_init = '__init__.py' in path and obj.__module__.startswith(
367 | modname)
368 | if is_module_within_init:
369 | found.add(obj.__module__.replace(modname + '.', ''))
370 |
371 | if hasattr(obj, "__module__") and (obj.__module__ == modname or is_module_within_init):
372 | names = list(
373 | filter(lambda name: not name.startswith("_"), names))
374 | if len(names) > 0:
375 | functions.append(self.func2md(obj, names=names))
376 | line_nos.append(self.get_line_no(obj) or 0)
377 | functions = order_by_line_nos(functions, line_nos)
378 |
379 | variables = []
380 | line_nos = []
381 | for name, obj in module.__dict__.items():
382 | if not name.startswith("_") and name not in found:
383 | if hasattr(obj, "__module__") and obj.__module__ != modname:
384 | continue
385 | if hasattr(obj, "__name__") and not obj.__name__.startswith(modname):
386 | continue
387 |
388 | comments = inspect.getcomments(obj)
389 | comments = ": %s" % comments if comments else ""
390 | variables.append("- **%s**%s" % (name, comments))
391 | line_nos.append(self.get_line_no(obj) or 0)
392 |
393 | variables = order_by_line_nos(variables, line_nos)
394 | if variables:
395 | new_list = ["**Global Variables**", "---------------"]
396 | new_list.extend(variables)
397 | variables = new_list
398 |
399 | string = MODULE_TEMPLATE.format(path=path,
400 | global_vars="\n".join(
401 | variables) if variables else "",
402 | functions="\n".join(
403 | functions) if functions else "",
404 | classes="".join(classes) if classes else "")
405 | string = _RE_URL.sub(r"<\1>", string)
406 | return string
407 |
--------------------------------------------------------------------------------
/docs/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Documentation Text Classification Keras
2 | theme: readthedocs
3 | docs_dir: sources
4 | repo_url: http://github.com/jfilter/text-classification-keras
5 | edit_uri: blob/master/docs/templates
6 | site_description: 'Documentation Text Classification Keras - Text Classification Library for Keras'
7 |
8 | extra_javascript:
9 | - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML
10 |
11 | extra_css:
12 | - css/extras.css
13 |
14 | markdown_extensions:
15 | - mdx_math
16 |
17 | pages:
18 | - Home: index.md
19 | - API Docs:
20 | - Models:
21 | - Sequence Processing Models: texcla.models.sequence_encoders.md
22 | - Sequence Model Builder Factory: texcla.models.token_model.md
23 | - Sentence Model Builder Factory: texcla.models.sentence_model.md
24 | - Custom Layers: texcla.models.layers.md
25 | - Preprocessing:
26 | - Word Tokenizer: texcla.preprocessing.word_tokenizer.md
27 | - Sentence Tokenizer: texcla.preprocessing.sentence_tokenizer.md
28 | - Tokenizer: texcla.preprocessing.tokenizer.md
29 | - Char Tokenizer: texcla.preprocessing.char_tokenizer.md
30 | - Utils: texcla.preprocessing.utils.md
31 | - Embeddings: texcla.embeddings.md
32 | - Experiment: texcla.experiment.md
33 | - Corpus: texcla.corpus.md
34 | - Data: texcla.data.md
35 | - Utils:
36 | - Format: texcla.utils.format.md
37 | - Generators: texcla.utils.generators.md
38 | - IO: texcla.utils.io.md
39 | - Sampling: texcla.utils.sampling.md
40 |
41 |
--------------------------------------------------------------------------------
/docs/templates/css/extras.css:
--------------------------------------------------------------------------------
1 | /*
2 | * Tweak the overall; size to better match RTD.
3 | */
4 | body {
5 | font-size: 90%;
6 | }
7 |
8 | h1, h2, h3, h4, h5, h6, legend {
9 | font-family: "Source Sans Pro", "ff-tisa-web-pro", "Georgia", Arial, sans-serif;,
10 | }
11 |
12 | /*
13 | * Sphinx doesn't have support for section dividers like we do in
14 | * MkDocs, this styles the section titles in the nav
15 | *
16 | * https://github.com/mkdocs/mkdocs/issues/175
17 | */
18 | .wy-menu-vertical span {
19 | line-height: 18px;
20 | padding: 0.4045em 1.618em;
21 | display: block;
22 | position: relative;
23 | font-size: 90%;
24 | color: #838383;
25 | }
26 |
27 | .wy-menu-vertical .subnav a {
28 | padding: 0.4045em 2.427em;
29 | }
30 |
31 | /*
32 | * Long navigations run off the bottom of the screen as the nav
33 | * area doesn't scroll.
34 | *
35 | * https://github.com/mkdocs/mkdocs/pull/202
36 | */
37 | .wy-nav-side {
38 | height: 100%;
39 | overflow-y: auto;
40 | }
41 |
42 | .wy-nav-content {
43 | max-width: 900px;
44 | }
45 |
46 | /*
47 | * readthedocs theme hides nav items when the window height is
48 | * too small to contain them.
49 | *
50 | * https://github.com/mkdocs/mkdocs/issues/#348
51 | */
52 | .wy-menu-vertical ul {
53 | margin-bottom: 2em;
54 | }
55 |
56 | /*
57 | * Fix wrapping in the code highlighting
58 | *
59 | * https://github.com/mkdocs/mkdocs/issues/233
60 | */
61 | code {
62 | white-space: pre;
63 | font-size: 90%;
64 | color: #9E0F00;
65 | background: #FFFAFA;
66 | }
67 |
68 | /*
69 | * Wrap inline code samples otherwise they shoot of the side and
70 | * can't be read at all.
71 | *
72 | * https://github.com/mkdocs/mkdocs/issues/313
73 | */
74 | p code {
75 | word-wrap: break-word;
76 | }
77 |
78 | /*
79 | * The CSS classes from highlight.js seem to clash with the
80 | * ReadTheDocs theme causing some code to be incorrectly made
81 | * bold and italic.
82 | *
83 | * https://github.com/mkdocs/mkdocs/issues/411
84 | */
85 | code.cs, code.c {
86 | font-weight: inherit;
87 | font-style: inherit;
88 | }
89 |
90 | /*
91 | * Fix some issues with the theme and non-highlighted code
92 | * samples. Without and highlighting styles attached the
93 | * formatting is broken.
94 | *
95 | * https://github.com/mkdocs/mkdocs/issues/319
96 | */
97 | .no-highlight {
98 | display: block;
99 | padding: 0.5em;
100 | color: #333;
101 | }
102 |
103 |
104 | /*
105 | * Additions specific to the search functionality provided by MkDocs
106 | */
107 |
108 | #mkdocs-search-results article h3
109 | {
110 | margin-top: 23px;
111 | border-top: 1px solid #E1E4E5;
112 | padding-top: 24px;
113 | }
114 |
115 | #mkdocs-search-results article:first-child h3 {
116 | border-top: none;
117 | }
118 |
119 | #mkdocs-search-query{
120 | width: 100%;
121 | border-radius: 50px;
122 | padding: 6px 12px;
123 | border-color: #D1D4D5;
124 | }
125 |
126 | .wy-menu-vertical li ul {
127 | display: inherit;
128 | }
129 |
130 | .wy-menu-vertical li ul.subnav ul.subnav{
131 | padding-left: 1em;
132 | }
133 |
--------------------------------------------------------------------------------
/docs/templates/visualizations/activation_maximization.md:
--------------------------------------------------------------------------------
1 | ## What is Activation Maximization?
2 |
3 | In a CNN, each Conv layer has several learned *template matching* filters that maximize their output when a similar
4 | template pattern is found in the input image. First Conv layer is easy to interpret; simply visualize the weights as
5 | an image. To see what the Conv layer is doing, a simple option is to apply the filter over raw input pixels.
6 | Subsequent Conv filters operate over the outputs of previous Conv filters (which indicate the presence or absence
7 | of some templates), making them hard to interpret.
8 |
9 | The idea behind activation maximization is simple in hindsight - Generate an input image that maximizes the filter
10 | output activations. i.e., we compute
11 |
12 | $$\frac{\partial ActivationMaximizationLoss}{\partial input}$$
13 |
14 | and use that estimate to update the input. [ActivationMaximization](../vis.losses#activationmaximization) loss simply
15 | outputs small values for large filter activations (we are minimizing losses during gradient descent iterations).
16 | This allows us to understand what sort of input patterns activate a particular filter. For example, there could be
17 | an eye filter that activates for the presence of eye within the input image.
18 |
19 | ## Usage
20 |
21 | There are two APIs exposed to perform activation maximization.
22 |
23 | 1. [visualize_activation](../vis.visualization#visualize_activation): This is the general purpose API for visualizing
24 | activations.
25 | 2. [visualize_activation_with_losses](../vis.visualization#visualize_activation_with_losses): This is intended for
26 | research use-cases where some custom weighted losses can be minimized.
27 |
28 | See [examples/](https://github.com/raghakot/keras-vis/tree/master/examples) for code examples.
29 |
30 | ### Scenarios
31 |
32 | The API is very general purpose and can be used in a wide variety of scenarios. We will list the most common use-cases
33 | below:
34 |
35 | #### Categorical Output Dense layer visualization
36 |
37 | How can we assess whether a network is over/under fitting or generalizing well? Given an input image, a CNN can
38 | classify whether it is a cat, bird etc. How can we be sure that it is capturing the correct notion of what it means
39 | to be a bird?
40 |
41 | One way to answer these questions is to pose the reverse question:
42 | > Generate an input image that maximizes the final `Dense` layer output corresponding to bird class.
43 |
44 | This can be done by pointing `layer_idx` to final `Dense` layer, and setting `filter_indices` to the desired output
45 | category.
46 |
47 | - For multi-class classification, `filter_indices` can point to a single class. You could point also point it to
48 | multiple categories to see what a cat-fish might look like, as an example.
49 | - For multi-label classifier, simply set the appropriate `filter_indices`.
50 |
51 | #### Regression Output Dense layer visualization
52 |
53 | Unlike class activation visualizations, for regression outputs, we could visualize input that
54 |
55 | - increases
56 | - decreases
57 |
58 | the regressed `filter_indices` output. For example, if you trained an apple counter model, increasing the regression
59 | output should correspond to more apples showing up in the input image. Similarly one could decrease the current output.
60 | This can be achieved by using `grad_modifier` option. As the name suggests, it is used to modify the gradient of losses
61 | with respect to inputs. By default, `ActivationMaximization` loss is used to increase the output. By setting
62 | `grad_modifier='negate'` you can negate the gradients, thus causing output values to decrease.
63 | [gradient_modifiers](../vis.grad_modifiers) are very powerful and show up in other visualization APIs as well.
64 |
65 |
66 | #### Conv filter visualization
67 |
68 | By pointing `layer_idx` to `Conv` layer, you can visualize what pattern activates a filter. This might help you discover
69 | what a filter might be computing. Here, `filter_indices` refers to the index of the `Conv` filter within the layer.
70 |
71 | ### Advanced usage
72 |
73 | [backprop_modifiers](../vis.backprop_modifiers) allow you to modify the backpropagation behavior. For examples,
74 | you could tweak backprop to only propagate positive gradients by using `backprop_modifier='relu'`. This parameter also
75 | accepts a function and can be used to implement your crazy research idea :)
76 |
77 | ## Tips and tricks
78 |
79 | - If you get garbage visualization, try setting `verbose=True` to see various losses during gradient descent iterations.
80 | By default, `visualize_activation` uses `TotalVariation` and `LpNorm` regularization to enforce natural image prior. It
81 | is very likely that you would see `ActivationMaximization Loss` bounce back and forth as they are dominated by regularization
82 | loss weights. Try setting all weights to zero and gradually try increasing values of total variation weight.
83 |
84 | - To get sharper looking images, use [Jitter](../vis.input_modifiers#jitter) input modifier.
85 |
86 | - Regression models usually do not provide enough gradient information to generate meaningful input images. Try seeding
87 | the input using `seed_input` and see if the modifications to the input make sense.
88 |
89 | - Consider submitting a PR to add more tips and tricks that you found useful.
90 |
--------------------------------------------------------------------------------
/docs/templates/visualizations/class_activation_maps.md:
--------------------------------------------------------------------------------
1 | ## What is a Class Activation Map?
2 |
3 | Class activation maps or grad-CAM is another way of visualizing attention over input. Instead of using gradients with
4 | respect to output (see [saliency](saliency)), grad-CAM uses penultimate (pre `Dense` layer) `Conv` layer output. The
5 | intuition is to use the nearest `Conv` layer to utilize spatial information that gets completely lost in `Dense` layers.
6 |
7 | In keras-vis, we use [grad-CAM](https://arxiv.org/pdf/1610.02391.pdf) as its considered more general than
8 | [Class Activation maps](http://cnnlocalization.csail.mit.edu/).
9 |
10 | ## Usage
11 |
12 | There are two APIs exposed to visualize grad-CAM and are almost identical to [saliency usage](saliency#Usage).
13 |
14 | 1. [visualize_cam](../vis.visualization#visualize_cam): This is the general purpose API for visualizing
15 | grad-CAM.
16 | 2. [visualize_cam_with_losses](../vis.visualization#visualize_cam_with_losses): This is intended for
17 | research use-cases where some custom weighted loss can be used.
18 |
19 | The only notable addition is the `penultimate_layer_idx` parameter. This can be used to specify the pre-layer
20 | whose output gradients are used. By default, keras-vis will search for the nearest layer with filters.
21 |
22 | ### Scenarios
23 |
24 | See [saliency scenarios](saliency#scenarios). Everything is identical expect the added `penultimate_layer_idx` param.
25 |
26 | ## Gotchas
27 |
28 | grad-CAM only works well if the penultimate layer is close to the layer being visualized. This also applies to `Conv`
29 | filter visualizations. You are better off using saliency of this is not the case with your model.
30 |
--------------------------------------------------------------------------------
/docs/templates/visualizations/saliency.md:
--------------------------------------------------------------------------------
1 | ## What is Saliency?
2 |
3 | Suppose that all the training images of *bird* class contains a tree with leaves. How do we know whether the CNN is
4 | using bird-related pixels, as opposed to some other features such as the tree or leaves in the image? This actually
5 | happens more often than you think and you should be especially suspicious if you have a small training set.
6 |
7 | Saliency maps was first introduced in the paper:
8 | [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034v2.pdf)
9 |
10 | The idea is pretty simple. We compute the gradient of output category with respect to input image. This should tell us
11 | how output category value changes with respect to a small change in input image pixels. All the positive values
12 | in the gradients tell us that a small change to that pixel will increase the output value.
13 | Hence, visualizing these gradients, which are the same shape as the image should provide some intuition of attention.
14 |
15 |
16 | The idea behind saliency is pretty simple in hindsight. We compute the gradient of output category with respect to
17 | input image.
18 |
19 | $$\frac{\partial output}{\partial input}$$
20 |
21 | This should tell us how the output value changes with respect to a small change in inputs. We can use these gradients
22 | to highlight input regions that cause the most change in the output. Intuitively this should highlight salient image
23 | regions that most contribute towards the output.
24 |
25 | ## Usage
26 |
27 | There are two APIs exposed to visualize saliency.
28 |
29 | 1. [visualize_saliency](../vis.visualization#visualize_saliency): This is the general purpose API for visualizing
30 | saliency.
31 | 2. [visualize_saliency_with_losses](../vis.visualization#visualize_saliency_with_losses): This is intended for
32 | research use-cases where some custom weighted loss can be used.
33 |
34 | See [examples/](https://github.com/raghakot/keras-vis/tree/master/examples) for code examples.
35 |
36 | ### Scenarios
37 |
38 | The API is very general purpose and can be used in a wide variety of scenarios. We will list the most common use-cases
39 | below:
40 |
41 | #### Categorical Dense layer visualization
42 |
43 | By setting `layer_idx` to final `Dense` layer, and `filter_indices` to the desired output category, we can visualize
44 | parts of the `seed_input` that contribute most towards activating the corresponding output nodes,
45 |
46 | - For multi-class classification, `filter_indices` can point to a single class.
47 | - For multi-label classifier, simply set the appropriate `filter_indices`.
48 |
49 | #### Regression Dense layer visualization
50 |
51 | For regression outputs, we could visualize attention over input that
52 |
53 | - increases
54 | - decreases
55 | - maintains
56 |
57 | the regressed `filter_indices` output. For example, consider a self driving model with continuous regression steering
58 | output. One could visualize parts of the `seed_input` that contributes towards increase, decrease or maintenance of
59 | predicted output.
60 |
61 | By default, saliency tells us how to increase the output activations. For the self driving car case, this only tells
62 | us parts of the input image that contribute towards steering angle increase. Other use cases can be visualized by
63 | using `grad_modifier` option. As the name suggests, it is used to modify the gradient of losses with respect to inputs.
64 |
65 | - To visualize decrease in output, use `grad_modifier='negate'`. By default, `ActivationMaximization` loss yields
66 | positive gradients for inputs regions that increase the output. By setting `grad_modifier='negate'` you can treat negative
67 | gradients (which indicate the decrease) as positive and therefore visualize decrease use case.
68 |
69 | - To visualize what contributed to the predicted output, we want to consider gradients that have very low positive
70 | or negative values. This can be achieved by performing `grads = abs(1 / grads)` to magnifies small gradients. Equivalently,
71 | you can use `grad_modifier='small_values'`, which does the same thing. [gradient_modifiers](../vis.grad_modifiers)
72 | are very powerful and show up in other visualization APIs as well.
73 |
74 | You can see a practical application for this in the
75 | [self diving car](https://github.com/raghakot/keras-vis/tree/master/applications/self_driving) example.
76 |
77 | #### Guided / rectified saliency
78 |
79 | Zieler et al. has the idea of clipping negative gradients in the backprop step. i.e., only propagate positive gradient
80 | information that communicates the increase in output. We call this rectified or deconv saliency. Details can be found
81 | in the paper: [Visualizing and Understanding Convolutional Networks](https://arxiv.org/pdf/1311.2901.pdf).
82 |
83 | In guided saliency, the backprop step is modified to only propagate positive gradients for positive activations.
84 | For details see the paper: [String For Simplicity: The All Convolutional Net](https://arxiv.org/pdf/1412.6806.pdf).
85 |
86 | For both these cases, we can use `backprop_modifier='relu'` and `backprop_modifier='guided'` respectively. You
87 | can also implement your own [backprop_modifier](../vis.backprop_modifiers) to try your crazy research idea :)
88 |
89 | #### Conv filter saliency
90 |
91 | By pointing `layer_idx` to `Conv` layer, you can visualize parts of the image that influence the filter. This might
92 | help you discover what a filter cares about. Here, `filter_indices` refers to the index of the `Conv` filter within
93 | the layer.
94 |
--------------------------------------------------------------------------------
/docs/update_docs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import shutil
4 | import subprocess
5 | import sys
6 |
7 | from texcla import corpus, data, embeddings, experiment
8 | from texcla.models import layers, sentence_model, sequence_encoders, token_model
9 | from texcla.preprocessing import char_tokenizer, sentence_tokenizer, tokenizer, utils, word_tokenizer
10 | from texcla.utils import format, generators, io, sampling
11 | from md_autogen import MarkdownAPIGenerator, to_md_file
12 |
13 |
14 | def generate_api_docs():
15 | modules = [
16 | token_model,
17 | sentence_model,
18 | sequence_encoders,
19 | layers,
20 | data, corpus, embeddings, experiment, format, generators, io, sampling,
21 | char_tokenizer, sentence_tokenizer, tokenizer, utils, word_tokenizer
22 | ]
23 |
24 | md_gen = MarkdownAPIGenerator(
25 | "texcla", "https://github.com/jfilter/text-classification-keras/tree/master")
26 | for m in modules:
27 | md_string = md_gen.module2md(m)
28 | to_md_file(md_string, m.__name__, "sources")
29 |
30 |
31 | def update_index_md():
32 | shutil.copyfile('../README.md', 'sources/index.md')
33 |
34 |
35 | def copy_templates():
36 | shutil.rmtree('sources', ignore_errors=True)
37 | shutil.copytree('templates', 'sources')
38 |
39 |
40 | if __name__ == "__main__":
41 | copy_templates()
42 | update_index_md()
43 | generate_api_docs()
44 | if len(sys.argv) > 1 and sys.argv[1] == '--live':
45 | subprocess.run("mkdocs gh-deploy", shell=True, check=True)
46 | else:
47 | subprocess.run(
48 | "mkdocs build && cd site && python3 -m http.server", shell=True, check=True)
49 |
--------------------------------------------------------------------------------
/examples/cnn_imdb.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from texcla import corpus, data, experiment
4 | from texcla.models import AlexCNN, AttentionRNN, BasicRNN, StackedRNN, TokenModelFactory, YoonKimCNN
5 | from texcla.preprocessing import FastTextWikiTokenizer
6 |
7 | # 1. `python imdb.py setup`: Setup and preprocess the data
8 | # 2. `python imdb.py train`: Load the setup data and train
9 |
10 | # truncate text input after 50 tokens (words)
11 | MAX_LEN = 50
12 |
13 |
14 | def setup():
15 | # limit to 5k pos. and 5k neg. samples (each for train and test)
16 | X_train, X_test, y_train, y_test = corpus.imdb(5000)
17 |
18 | # use the special tokenizer used for constructing the embeddings
19 | tokenizer = FastTextWikiTokenizer()
20 |
21 | # build vocabulary only on training data
22 | tokenizer = experiment.setup_data(
23 | X_train, y_train, tokenizer, 'imdb_train.bin', max_len=MAX_LEN)
24 | experiment.setup_data(X_test, y_test, tokenizer,
25 | 'imdb_test.bin', max_len=MAX_LEN)
26 |
27 |
28 | def train():
29 | ds_train = data.Dataset.load('imdb_train.bin')
30 | ds_val = data.Dataset.load('imdb_test.bin')
31 |
32 | # use the embedding trained on Simple English Wikipedia
33 | factory = TokenModelFactory(
34 | ds_train.num_classes, ds_train.tokenizer.token_index, max_tokens=MAX_LEN, embedding_type='fasttext.wiki.simple', embedding_dims=300)
35 |
36 | word_encoder_model = YoonKimCNN()
37 | # word_encoder_model = AttentionRNN()
38 | # word_encoder_model = StackedRNN()
39 | # word_encoder_model = BasicRNN()
40 |
41 | # freeze word embeddings
42 | model = factory.build_model(
43 | token_encoder_model=word_encoder_model, trainable_embeddings=False)
44 |
45 | # use experiment.train as wrapper for Keras.fit()
46 | experiment.train(x=ds_train.X, y=ds_train.y, validation_data=(ds_val.X, ds_val.y), model=model,
47 | word_encoder_model=word_encoder_model)
48 |
49 |
50 | if __name__ == '__main__':
51 | assert(len(sys.argv) == 2)
52 | if sys.argv[1] == 'setup':
53 | setup()
54 | if sys.argv[1] == 'train':
55 | train()
56 |
--------------------------------------------------------------------------------
/examples/fasttext_imdb.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from texcla import corpus, data, experiment
4 | from texcla.models import TokenModelFactory, AveragingEncoder
5 | from texcla.preprocessing import SpacyTokenizer
6 |
7 |
8 | # FIXME: NOT WORKING. PLEASE FIX ME. There seem to be something wrong with the n-gram features.
9 |
10 | # 1. `python imdb.py setup`: Setup and preprocess the data
11 | # 2. `python imdb.py train`: Load the setup data and train
12 |
13 | # truncate text input after 50 tokens (words)
14 | MAX_LEN = 400
15 | N_GRAMS = 2
16 | EMB_DIMS = 50
17 | EPOCHS = 5
18 | WORDS_LIMIT = 20000
19 |
20 |
21 | def setup():
22 | # limit to 5k pos. and 5k neg. samples (each for train and test)
23 | X_train, X_test, y_train, y_test = corpus.imdb(1000)
24 |
25 | # use the special tokenizer used for constructing the embeddings
26 | tokenizer = SpacyTokenizer()
27 |
28 | # build vocabulary only on training data
29 | tokenizer = experiment.setup_data(
30 | X_train, y_train, tokenizer, 'imdb_train.bin', max_len=MAX_LEN, ngrams=N_GRAMS, limit_top_tokens=WORDS_LIMIT)
31 | experiment.setup_data(X_test, y_test, tokenizer,
32 | 'imdb_test.bin', max_len=MAX_LEN)
33 |
34 |
35 | def train():
36 | ds_train = data.Dataset.load('imdb_train.bin')
37 | ds_val = data.Dataset.load('imdb_test.bin')
38 |
39 | factory = TokenModelFactory(
40 | ds_train.num_classes, ds_train.tokenizer.token_index, max_tokens=MAX_LEN, embedding_dims=EMB_DIMS, embedding_type=None)
41 |
42 | word_encoder_model = AveragingEncoder()
43 |
44 | # freeze word embeddings
45 | model = factory.build_model(
46 | token_encoder_model=word_encoder_model, trainable_embeddings=True)
47 |
48 | # use experiment.train as wrapper for Keras.fit()
49 | experiment.train(x=ds_train.X, y=ds_train.y, validation_data=(ds_val.X, ds_val.y), model=model,
50 | word_encoder_model=word_encoder_model, epochs=EPOCHS)
51 |
52 |
53 | if __name__ == '__main__':
54 | assert(len(sys.argv) == 2)
55 | if sys.argv[1] == 'setup':
56 | setup()
57 | if sys.argv[1] == 'train':
58 | train()
59 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | # Configuration of py.test
2 | [pytest]
3 | addopts=-v
4 | --durations=10
5 |
6 | # Do not run tests in the build folder
7 | norecursedirs= build libs .vscode
8 |
9 | # PEP-8 The following are ignored:
10 | # E501 line too long (82 > 79 characters)
11 | # E402 module level import not at top of file - temporary measure to continue adding ros python packaged in sys.path
12 | # E731 do not assign a lambda expression, use a def
13 |
14 | pep8ignore=* E501 \
15 | * E402 \
16 | * E731 \
17 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | .
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
4 | [bdist_wheel]
5 | universal=1
6 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from setuptools import find_packages
3 |
4 | with open("README.md", "r") as fh:
5 | long_description = fh.read()
6 |
7 | classifiers = [
8 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
9 | 'Programming Language :: Python :: 3.5',
10 | 'Programming Language :: Python :: 3.6',
11 | 'Programming Language :: Python :: 2.7',
12 | 'License :: OSI Approved :: MIT License',
13 | 'Development Status :: 3 - Alpha'
14 | ]
15 |
16 | version = '0.1.4'
17 |
18 | setup(name='text-classification-keras',
19 | version=version,
20 | description='Text Classification Library for Keras',
21 | long_description=long_description,
22 | long_description_content_type="text/markdown",
23 | author='Raghavendra Kotikalapudi, Johannes Filter',
24 | author_email='ragha@outlook.com, hi@jfilter.de',
25 | url='https://github.com/jfilter/text-classification-keras',
26 | license='MIT',
27 | install_requires=['keras==2.*', 'six==1.*',
28 | 'scikit-learn==0.*', 'joblib==0.*',
29 | 'jsonpickle==0.*', 'numpy==1.*'],
30 | extras_require={'full': ['spacy==2.*',
31 | 'deep-plots==0.*', 'tensorflow==1.*']},
32 | include_package_data=True,
33 | classifiers=classifiers,
34 | packages=find_packages())
35 |
--------------------------------------------------------------------------------
/tests/embeddings.py:
--------------------------------------------------------------------------------
1 | from texcla.embeddings import get_embeddings_index
2 |
3 |
4 | def test_build_index_glove():
5 | index = get_embeddings_index('glove.6B.50d', 50, cache=False)
6 | assert(index['a'] is not None)
7 | assert(index['and'] is not None)
8 |
9 |
10 | def test_build_index_fasttext_en():
11 | index = get_embeddings_index('fasttext.wn.1M.300d', 300, cache=False)
12 | assert(index['a'] is not None)
13 | assert(index['and'] is not None)
14 |
15 |
16 | def test_build_index_fasttext_wiki():
17 | index = get_embeddings_index('fasttext.wiki.simple', 300, cache=False)
18 | assert(index['a'] is not None)
19 | assert(index['and'] is not None)
20 |
21 |
22 | def test_build_index_fasttext_cc():
23 | index = get_embeddings_index('fasttext.cc.en', 300, cache=False)
24 | assert(index['a'] is not None)
25 | assert(index['and'] is not None)
26 |
--------------------------------------------------------------------------------
/tests/models/test_sentence_model.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from texcla.models import AttentionRNN, AveragingEncoder, SentenceModelFactory, StackedRNN, YoonKimCNN
3 |
4 |
5 | def _test_build(token_encoder_model, sentence_encoder_model):
6 | test_index = {'hello': 1, 'kitty': 2}
7 |
8 | if sentence_encoder_model.allows_dynamic_length():
9 | factory = SentenceModelFactory(
10 | 10, test_index, max_sents=None, max_tokens=200, embedding_type=None)
11 | model = factory.build_model(
12 | token_encoder_model, sentence_encoder_model)
13 | model.compile(optimizer='adam', loss='categorical_crossentropy')
14 | model.summary()
15 | else:
16 | # Should fail since this model does not allow dynamic mini-batches.
17 | factory = SentenceModelFactory(
18 | 10, test_index, max_sents=None, max_tokens=200, embedding_type=None)
19 | with pytest.raises(ValueError):
20 | factory.build_model(token_encoder_model, sentence_encoder_model)
21 |
22 | factory = SentenceModelFactory(
23 | 10, test_index, max_sents=500, max_tokens=200, embedding_type=None)
24 | model = factory.build_model(
25 | token_encoder_model, sentence_encoder_model)
26 | model.compile(optimizer='adam', loss='categorical_crossentropy')
27 | model.summary()
28 |
29 |
30 | def test_hierarchical_attention_model():
31 | _test_build(AttentionRNN(), AttentionRNN())
32 |
33 |
34 | def test_combinations():
35 | encoders = [YoonKimCNN(), AttentionRNN(), StackedRNN(), AveragingEncoder()]
36 | for word_encoder in encoders:
37 | for sentence_encoder in encoders:
38 | print('Testing combination {}, {}'.format(
39 | word_encoder.__class__, sentence_encoder.__class__))
40 | _test_build(word_encoder, sentence_encoder)
41 |
42 |
43 | if __name__ == '__main__':
44 | pytest.main([__file__])
45 |
--------------------------------------------------------------------------------
/tests/models/test_token_model.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from texcla.models import AttentionRNN, StackedRNN, TokenModelFactory, YoonKimCNN
3 |
4 |
5 | def _test_build(token_encoder_model):
6 | test_index = {'hello': 1, 'kitty': 2}
7 |
8 | if token_encoder_model.allows_dynamic_length():
9 | factory = TokenModelFactory(
10 | 1, test_index, max_tokens=None, embedding_type=None)
11 | model = factory.build_model(token_encoder_model)
12 | model.compile(optimizer='adam', loss='categorical_crossentropy')
13 | model.summary()
14 | else:
15 | # Should fail since this model does not allow dynamic mini-batches.
16 | factory = TokenModelFactory(
17 | 1, test_index, max_tokens=None, embedding_type=None)
18 | with pytest.raises(ValueError):
19 | factory.build_model(token_encoder_model)
20 |
21 | factory = TokenModelFactory(
22 | 1, test_index, max_tokens=100, embedding_type=None)
23 | model = factory.build_model(token_encoder_model)
24 | model.compile(optimizer='adam', loss='categorical_crossentropy')
25 | model.summary()
26 |
27 |
28 | def test_yoon_kim_cnn():
29 | _test_build(YoonKimCNN())
30 |
31 |
32 | def test_attention_rnn():
33 | _test_build(AttentionRNN())
34 |
35 |
36 | def test_stacked_rnn():
37 | _test_build(StackedRNN())
38 |
39 |
40 | if __name__ == '__main__':
41 | pytest.main([__file__])
42 |
--------------------------------------------------------------------------------
/tests/preprocessing.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 |
3 | import keras
4 | import pytest
5 |
6 | from texcla.data import Dataset
7 | from texcla.preprocessing import *
8 | from texcla.preprocessing.utils import unicodify
9 |
10 |
11 | def test_token_preprocessing(tmpdir):
12 | tokenizer = SpacyTokenizer()
13 |
14 | X = ['hello', 'world', 'welcome', 'earth']
15 | y = [0, 1, 0, 1]
16 |
17 | tokenizer.build_vocab(X)
18 |
19 | assert(len(tokenizer.token_index) - len(tokenizer.special_token) == 4)
20 |
21 | X_enc = tokenizer.encode_texts(X)
22 | X_fin = tokenizer.pad_sequences(X_enc, fixed_token_seq_length=50)
23 |
24 | ds = Dataset(X_fin, y, tokenizer=tokenizer)
25 |
26 | path = str(tmpdir.mkdir("data").join("test"))
27 |
28 | ds.save(path)
29 |
30 | ds_new = Dataset.load(path)
31 |
32 | # only first word
33 | assert(all([a == b for a, b in zip(ds_new.X[0], X_fin[0])]))
34 |
35 |
36 | def test_sentence_tokenizer():
37 | texts = [
38 | "HELLO world hello. How are you today? Did you see the S.H.I.E.L.D?",
39 | "Quick brown fox. Ran over the, building 1234?",
40 | ]
41 | unicodify(texts)
42 | tokenizer = SpacySentenceTokenizer()
43 | tokenizer.build_vocab(texts)
44 | tokenizer.apply_encoding_options(limit_top_tokens=5)
45 | encoded = tokenizer.encode_texts(texts)
46 | decoded = tokenizer.decode_texts(encoded, inplace=False)
47 |
48 | assert(len(decoded) == 2)
49 |
50 | decoded_flat = sum(sum(decoded, []), [])
51 |
52 | assert(len(set(decoded_flat)) == 6) # 5 + 1 for
53 |
54 |
55 | def test_padding():
56 | texts = [
57 | "HELLO world hello.",
58 | "Quick brown fox. Ran over the, building 1234?",
59 | "Peter is a cool guy.",
60 | ]
61 |
62 | texts = unicodify(texts)
63 | tokenizer = SpacyTokenizer()
64 | tokenizer.build_vocab(texts[:-1])
65 |
66 | encoded = tokenizer.encode_texts(texts)
67 | padded = tokenizer.pad_sequences(encoded, fixed_token_seq_length=7)
68 |
69 | assert(len(padded[0]) == 7)
70 | assert(len(padded[1]) == 7)
71 | assert(len(padded[2]) == 7)
72 |
73 | decoded = tokenizer.decode_texts(padded, inplace=False)
74 | print(decoded)
75 | assert('guy' not in decoded[-1])
76 |
77 |
78 | def test_twokenizer():
79 | texts = [
80 | "HELLO world hello.",
81 | "Quick brown fox. Ran over the, building 1234 1.2.3.5?",
82 | "Peter is a cool guy.",
83 | ]
84 | tokenizer = TwokenizeTokenizer()
85 | tokenizer.build_vocab(texts)
86 | assert('1.2.3.5' in tokenizer.token_index)
87 | assert('1' not in tokenizer.token_index)
88 |
89 |
90 | def test_simple_tokenizer():
91 | texts = [
92 | "HELLO world hello.",
93 | "Quick brown fox. Ran over the, building 1234 1.2.3.5?",
94 | "Peter is a cool guy.",
95 | ]
96 | tokenizer = SimpleTokenizer()
97 | tokenizer.build_vocab(texts)
98 | assert('fox.' in tokenizer.token_index)
99 | assert(' ' not in tokenizer.token_index)
100 |
101 |
102 | def test_fasttext_wiki_tokenizer():
103 | texts = [
104 | "HELLO world hello.",
105 | "Quick brown fox. Ran over the, building 1234 1.2.3.5?",
106 | "Peter is a cool guy.",
107 | ]
108 | tokenizer = FastTextWikiTokenizer()
109 | tokenizer.build_vocab(texts)
110 | assert('fox' in tokenizer.token_index)
111 | assert(' ' not in tokenizer.token_index)
112 |
--------------------------------------------------------------------------------
/tests/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import keras
5 | import numpy as np
6 | import pytest
7 |
8 | from texcla.corpus import imdb
9 | from texcla.data import Dataset
10 | from texcla.models import AttentionRNN, StackedRNN, TokenModelFactory, YoonKimCNN
11 | from texcla.preprocessing import SpacyTokenizer, SimpleTokenizer
12 | from texcla import experiment
13 |
14 | max_len = 50
15 |
16 |
17 | def test_train():
18 | X, y, _, _ = imdb(10)
19 |
20 | # use the special tokenizer used for constructing the embeddings
21 | tokenizer = SpacyTokenizer()
22 |
23 | # preprocess data (once)
24 | experiment.setup_data(X, y, tokenizer, 'data.bin', max_len=100)
25 |
26 | # load data
27 | ds = Dataset.load('data.bin')
28 |
29 | # construct base
30 | factory = TokenModelFactory(
31 | ds.num_classes, ds.tokenizer.token_index, max_tokens=100,
32 | embedding_type='glove.6B.50d', embedding_dims=50)
33 |
34 | # choose a model
35 | word_encoder_model = YoonKimCNN()
36 |
37 | # build a model
38 | model = factory.build_model(
39 | token_encoder_model=word_encoder_model, trainable_embeddings=False)
40 |
41 | # use experiment.train as wrapper for Keras.fit()
42 | experiment.train(x=ds.X, y=ds.y, validation_split=0.1, model=model,
43 | word_encoder_model=word_encoder_model, epochs=1, batch_size=32)
44 |
45 |
46 | def test_train_multi_label():
47 | X, y = ['what is up', 'yes yes', 'no no no'], [
48 | ["foo", "bar"], ["foo"], ["bar", "haha"]]
49 |
50 | # use the special tokenizer used for constructing the embeddings
51 | tokenizer = SimpleTokenizer()
52 |
53 | # preprocess data (once)
54 | experiment.setup_data(X, y, tokenizer, 'data.bin', max_len=100)
55 |
56 | # load data
57 | ds = Dataset.load('data.bin')
58 |
59 | # construct base
60 | factory = TokenModelFactory(
61 | ds.num_classes, ds.tokenizer.token_index, max_tokens=100,
62 | embedding_type='glove.6B.50d', embedding_dims=50)
63 |
64 | # choose a model
65 | word_encoder_model = YoonKimCNN()
66 |
67 | # build a model
68 | model = factory.build_model(
69 | token_encoder_model=word_encoder_model, trainable_embeddings=False, output_activation="sigmoid")
70 |
71 | # use experiment.train as wrapper for Keras.fit()
72 | experiment.train(x=ds.X, y=ds.y, validation_split=0.1, model=model,
73 | word_encoder_model=word_encoder_model, epochs=1, batch_size=32)
74 |
--------------------------------------------------------------------------------
/texcla/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | logging.basicConfig(level=logging.INFO)
3 |
--------------------------------------------------------------------------------
/texcla/corpus.py:
--------------------------------------------------------------------------------
1 | import os
2 | import io
3 |
4 | import keras
5 | import sklearn
6 |
7 |
8 | def read_folder(directory):
9 | """read text files in directory and returns them as array
10 |
11 | Args:
12 | directory: where the text files are
13 |
14 | Returns:
15 | Array of text
16 | """
17 | res = []
18 | for filename in os.listdir(directory):
19 | with io.open(os.path.join(directory, filename), encoding="utf-8") as f:
20 | content = f.read()
21 | res.append(content)
22 | return res
23 |
24 |
25 | def read_pos_neg_data(path, folder, limit):
26 | """returns array with positive and negative examples"""
27 | training_pos_path = os.path.join(path, folder, 'pos')
28 | training_neg_path = os.path.join(path, folder, 'neg')
29 |
30 | X_pos = read_folder(training_pos_path)
31 | X_neg = read_folder(training_neg_path)
32 |
33 | if limit is None:
34 | X = X_pos + X_neg
35 | else:
36 | X = X_pos[:limit] + X_neg[:limit]
37 |
38 | y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2)
39 |
40 | return X, y
41 |
42 |
43 | def imdb(limit=None, shuffle=True):
44 | """Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data
45 |
46 | Args:
47 | limit: get only first N items for each class
48 |
49 | Returns:
50 | [X_train, y_train, X_test, y_test]
51 | """
52 |
53 | movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
54 |
55 | # download and extract, thus remove the suffix '.tar.gz'
56 | path = keras.utils.get_file(
57 | 'aclImdb.tar.gz', movie_review_url, extract=True)[:-7]
58 |
59 | X_train, y_train = read_pos_neg_data(path, 'train', limit)
60 | X_test, y_test = read_pos_neg_data(path, 'test', limit)
61 |
62 | if shuffle:
63 | X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
64 | X_test, y_test = sklearn.utils.shuffle(X_test, y_test)
65 |
66 | return X_train, X_test, y_train, y_test
67 |
--------------------------------------------------------------------------------
/texcla/data.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import logging
4 |
5 | import numpy as np
6 | from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
7 |
8 | from .utils import io, sampling
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class Dataset(object):
14 |
15 | def __init__(self, X, y, tokenizer=None):
16 | """Encapsulates all pieces of data to run an experiment. This is basically a bag of items that makes it
17 | easy to serialize and deserialize everything as a unit.
18 |
19 | Args:
20 | X: The raw model inputs. This can be set to None if you dont want
21 | to serialize this value when you save the dataset.
22 | y: The raw output labels.
23 | tokenizer: The optional test indices to use. Ideally, this should be generated one time and reused
24 | across experiments to make results comparable. `generate_test_indices` can be used generate first
25 | time indices.
26 | **kwargs: Additional key value items to store.
27 | """
28 | self.X = np.array(X)
29 | self.y = np.array(y)
30 | self.tokenizer = tokenizer
31 |
32 | self.is_multi_label = isinstance(y[0], (set, list, tuple))
33 | if self.is_multi_label:
34 | self.label_encoder = MultiLabelBinarizer()
35 | self.y = self.label_encoder.fit_transform(self.y)
36 | else:
37 | self.label_encoder = LabelBinarizer()
38 | self.label_encoder = self.label_encoder.fit(self.y)
39 | if (len(self.labels) == 2):
40 | # https://stackoverflow.com/questions/31947140/sklearn-labelbinarizer-returns-vector-when-there-are-2-classes
41 | self.y = np.array(
42 | [[1, 0] if l == self.labels[0] else [0, 1] for l in self.y])
43 | else:
44 | self.y = self.label_encoder.transform(self.y)
45 |
46 | def save(self, file_path):
47 | """Serializes this dataset to a file.
48 |
49 | Args:
50 | file_path: The file path to use.
51 | """
52 | io.dump(self, file_path)
53 |
54 | @staticmethod
55 | def load(file_path):
56 | """Loads the dataset from a file.
57 |
58 | Args:
59 | file_path: The file path to use.
60 |
61 | Returns:
62 | The `Dataset` instance.
63 | """
64 | return io.load(file_path)
65 |
66 | @property
67 | def labels(self):
68 | return self.label_encoder.classes_
69 |
70 | @property
71 | def num_classes(self):
72 | if len(self.y.shape) == 1:
73 | return 1
74 | else:
75 | return len(self.labels)
76 |
--------------------------------------------------------------------------------
/texcla/embeddings.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import absolute_import, unicode_literals
3 |
4 | import gzip
5 | import io
6 | import logging
7 | import os
8 | from zipfile import ZipFile
9 |
10 | import numpy as np
11 | import six
12 | from keras.utils.data_utils import get_file
13 |
14 | logger = logging.getLogger(__name__)
15 | _EMBEDDINGS_CACHE = dict()
16 |
17 | # Add more types here as needed.
18 | # – fastText: https://fasttext.cc/docs/en/english-vectors.html
19 | # - glove: https://nlp.stanford.edu/projects/glove/
20 |
21 | _EMBEDDING_TYPES = {
22 | # 1 million word vectors trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).
23 | 'fasttext.wn.1M.300d': {
24 | 'file': 'wiki-news-300d-1M.vec.zip',
25 | 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip'
26 | },
27 | # 1 million word vectors trained with subword infomation on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).
28 | 'fasttext.wn.1M.300d.subword': {
29 | 'file': 'wiki-news-300d-1M-subword.vec.zip',
30 | 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M-subword.vec.zip'
31 | },
32 | # 2 million word vectors trained on Common Crawl (600B tokens).
33 | 'fasttext.crawl.2M.300d.subword': {
34 | 'file': 'fasttext.wn.1M.300d.subword.vec.zip',
35 | 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip'
36 | },
37 | # 42 Billion tokens Common Crawl
38 | 'glove.42B.300d': {
39 | 'file': 'glove.42B.300d.txt.zip',
40 | 'url': 'http://nlp.stanford.edu/data/glove.42B.300d.zip'
41 | },
42 | # 6 Billion tokens from Wikipedia 2014 + Gigaword 5
43 | 'glove.6B.50d': {
44 | 'file': 'glove.6B.zip',
45 | 'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
46 | 'extract': False,
47 | 'file_in_zip': 'glove.6B.50d.txt'
48 | },
49 |
50 | 'glove.6B.100d': {
51 | 'file': 'glove.6B.zip',
52 | 'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
53 | 'extract': False,
54 | 'file_in_zip': 'glove.6B.100d.txt'
55 | },
56 |
57 | 'glove.6B.200d': {
58 | 'file': 'glove.6B.zip',
59 | 'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
60 | 'extract': False,
61 | 'file_in_zip': 'glove.6B.200d.txt'
62 | },
63 |
64 | 'glove.6B.300d': {
65 | 'file': 'glove.6B.zip',
66 | 'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
67 | 'extract': False,
68 | 'file_in_zip': 'glove.6B.300d.txt'
69 | },
70 | # 840 Billion tokens Common Crawl
71 | 'glove.840B.300d': {
72 | 'file': 'glove.840B.300d.txt.zip',
73 | 'url': 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
74 | },
75 | # 2 Billion tweets, 27 Billion tokens Twitter
76 | 'glove.twitter.27B.25d': {
77 | 'file': 'glove.twitter.27B.zip',
78 | 'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
79 | 'extract': False,
80 | 'file_in_zip': 'glove.twitter.27B.25d.txt'
81 | },
82 | 'glove.twitter.27B.50d': {
83 | 'file': 'glove.twitter.27B.zip',
84 | 'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
85 | 'extract': False,
86 | 'file_in_zip': 'glove.twitter.27B.50d.txt'
87 | },
88 | 'glove.twitter.27B.100d': {
89 | 'file': 'glove.twitter.27B.zip',
90 | 'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
91 | 'extract': False,
92 | 'file_in_zip': 'glove.twitter.27B.100d.txt'
93 | },
94 | 'glove.twitter.27B.200d': {
95 | 'file': 'glove.twitter.27B.zip',
96 | 'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
97 | 'extract': False,
98 | 'file_in_zip': 'glove.twitter.27B.200d.txt'
99 | },
100 | }
101 |
102 |
103 | def _build_line(embedding_dims, f, is_gzip=False):
104 | index = {}
105 |
106 | for line in f:
107 |
108 | # has to be done for gziped files
109 | if is_gzip and six.PY2:
110 | line = line.decode('utf-8')
111 |
112 | values = line.split()
113 | assert len(values) >= embedding_dims or len(
114 | values) == 2, 'is the file corrupted?'
115 |
116 | # some hack for fasttext vectors where the first line is (num_token, dimensions)
117 | if len(values) <= 2 and embedding_dims > 1:
118 | continue
119 |
120 | word = ' '.join(values[:-embedding_dims])
121 | floats = values[-embedding_dims:]
122 |
123 | if not isinstance(word, six.text_type):
124 | word = word.decode()
125 |
126 | vector = np.asarray(floats, dtype='float32')
127 | index[word] = vector
128 | return index
129 |
130 |
131 | def _build_embeddings_index(embeddings_path, embedding_dims):
132 | logger.info('Building embeddings index...')
133 | if embeddings_path.endswith('.gz'):
134 | with gzip.open(embeddings_path, 'rt') as f:
135 | index = _build_line(embedding_dims, f, is_gzip=True)
136 |
137 | else:
138 | # is ignoring errors a good idea? 🤔
139 | with io.open(embeddings_path, encoding="utf-8", errors='ignore') as f:
140 | index = _build_line(embedding_dims, f)
141 |
142 | logger.info('Done')
143 | return index
144 |
145 |
146 | def build_embedding_weights(word_index, embeddings_index):
147 | """Builds an embedding matrix for all words in vocab using embeddings_index
148 | """
149 | logger.info('Loading embeddings for all words in the corpus')
150 | embedding_dim = list(embeddings_index.values())[0].shape[-1]
151 |
152 | # setting special tokens such as UNK and PAD to 0
153 | # all other words are also set to 0.
154 | embedding_weights = np.zeros((len(word_index), embedding_dim))
155 |
156 | for word, i in word_index.items():
157 | word_vector = embeddings_index.get(word)
158 | if word_vector is not None:
159 | embedding_weights[i] = word_vector
160 |
161 | return embedding_weights
162 |
163 |
164 | def build_fasttext_wiki_embedding_obj(embedding_type):
165 | """FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html.
166 |
167 | Args:
168 | embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es`
169 | Returns:
170 | Object with the URL and filename used later on for downloading the file.
171 | """
172 | lang = embedding_type.split('.')[2]
173 | return {
174 | 'file': 'wiki.{}.vec'.format(lang),
175 | 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'.format(lang),
176 | 'extract': False,
177 | }
178 |
179 |
180 | def build_fasttext_cc_embedding_obj(embedding_type):
181 | """FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html.
182 |
183 | Args:
184 | embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es`
185 | Returns:
186 | Object with the URL and filename used later on for downloading the file.
187 | """
188 | lang = embedding_type.split('.')[2]
189 | return {
190 | 'file': 'cc.{}.300.vec.gz'.format(lang),
191 | 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'.format(lang),
192 | 'extract': False
193 | }
194 |
195 |
196 | def get_embedding_type(embedding_type):
197 | if embedding_type.startswith('fasttext.wiki.'):
198 | return build_fasttext_wiki_embedding_obj(embedding_type)
199 | if embedding_type.startswith('fasttext.cc.'):
200 | return build_fasttext_cc_embedding_obj(embedding_type)
201 |
202 | data_obj = _EMBEDDING_TYPES.get(embedding_type)
203 |
204 | if data_obj is None:
205 | raise ValueError("Embedding type should be either `fasttext.wiki.$LANG_CODE`, `fasttext.cc.$LANG_CODE` or one of the English embeddings: '{}'".format(
206 | _EMBEDDING_TYPES.keys()))
207 |
208 | return data_obj
209 |
210 |
211 | def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
212 | """Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
213 |
214 | Args:
215 | embedding_type: The embedding type to load.
216 | embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
217 |
218 | Returns:
219 | The embeddings indexed by word.
220 | """
221 |
222 | if embedding_path is not None:
223 | embedding_type = embedding_path # identify embedding by path
224 |
225 | embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
226 | if embeddings_index is not None:
227 | return embeddings_index
228 |
229 | if embedding_path is None:
230 | embedding_type_obj = get_embedding_type(embedding_type)
231 |
232 | # some very rough wrangling of zip files with the keras util `get_file`
233 | # a special problem: when multiple files are in one zip file
234 | extract = embedding_type_obj.get('extract', True)
235 | file_path = get_file(
236 | embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
237 |
238 | if 'file_in_zip' in embedding_type_obj:
239 | zip_folder = file_path.split('.zip')[0]
240 | with ZipFile(file_path, 'r') as zf:
241 | zf.extractall(zip_folder)
242 | file_path = os.path.join(
243 | zip_folder, embedding_type_obj['file_in_zip'])
244 | else:
245 | if extract:
246 | if file_path.endswith('.zip'):
247 | file_path = file_path.split('.zip')[0]
248 | # if file_path.endswith('.gz'):
249 | # file_path = file_path.split('.gz')[0]
250 | else:
251 | file_path = embedding_path
252 |
253 | embeddings_index = _build_embeddings_index(file_path, embedding_dims)
254 |
255 | if cache:
256 | _EMBEDDINGS_CACHE[embedding_type] = embeddings_index
257 | return embeddings_index
258 |
--------------------------------------------------------------------------------
/texcla/experiment.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import datetime
3 | import inspect
4 | import os
5 | import pathlib
6 | from os import path
7 | from shutil import copyfile, move
8 |
9 | import deep_plots
10 | import keras
11 | import six
12 | from sklearn.model_selection import train_test_split
13 |
14 | from .data import Dataset
15 | from .utils.format import to_fixed_digits
16 |
17 |
18 | def create_experiment_folder(base_dir, model, lr, batch_size):
19 | if six.PY2:
20 | try:
21 | os.makedirs(base_dir)
22 | except:
23 | pass
24 | else:
25 | pathlib.Path(base_dir).mkdir(parents=True, exist_ok=True)
26 |
27 | num_folders = len(next(os.walk(base_dir))[1])
28 |
29 | # 4 digits
30 | exp_id = "%05d" % num_folders
31 |
32 | filename = [exp_id, str(model), "lr",
33 | to_fixed_digits(lr), "bs", to_fixed_digits(batch_size)]
34 | filename = '_'.join(filename)
35 | filename = filename.replace('.', '_')
36 |
37 | exp_path = path.join(base_dir, filename)
38 | pathlib.Path(exp_path).mkdir(parents=True)
39 | return exp_path
40 |
41 |
42 | def copy_called_file(exp_path):
43 | # because it's called within train
44 | _, filename, _, _, _, _ = inspect.stack()[2]
45 | copyfile(filename, path.join(
46 | exp_path, filename.split('/')[-1])) # only last
47 |
48 |
49 | def create_callbacks(exp_path, patience):
50 | checkpoint = keras.callbacks.ModelCheckpoint(
51 | path.join(exp_path, 'best.hdf5'), monitor='val_acc', save_best_only=True, verbose=1)
52 | early_stop = keras.callbacks.EarlyStopping(
53 | monitor='val_loss', patience=patience, verbose=1)
54 | csv_logger = keras.callbacks.CSVLogger(
55 | path.join(exp_path, 'log.csv'), append=True, separator=';')
56 | return [checkpoint, early_stop, csv_logger]
57 |
58 |
59 | def train(model, word_encoder_model, lr=0.001, batch_size=64, epochs=50, patience=10, base_dir='experiments', **fit_args):
60 | optimizer = keras.optimizers.adam(lr=lr)
61 | model.compile(optimizer=optimizer,
62 | loss='categorical_crossentropy', metrics=['accuracy'])
63 |
64 | exp_path = create_experiment_folder(
65 | base_dir, word_encoder_model, lr=lr, batch_size=batch_size)
66 |
67 | copy_called_file(exp_path)
68 |
69 | model.summary()
70 |
71 | with open(path.join(exp_path, 'config.txt'), 'a') as the_file:
72 | the_file.write(
73 | '\n'.join([str(x) for x in [lr, word_encoder_model.dropout_rate, batch_size, datetime.datetime.now()]]))
74 |
75 | history = model.fit(epochs=epochs,
76 | batch_size=batch_size, callbacks=create_callbacks(exp_path, patience), **fit_args)
77 |
78 | best_acc = str(max(history.history['val_acc']))[:6]
79 |
80 | # append best acc
81 | deep_plots.from_keras_log(path.join(exp_path, 'log.csv'), exp_path)
82 | move(exp_path, exp_path + '_' + best_acc)
83 |
84 |
85 | def load_csv(data_path=None, text_col='text', class_col='class', limit=None):
86 | X = []
87 | y = []
88 |
89 | with open(data_path) as csvfile:
90 | reader = csv.DictReader(csvfile)
91 | if limit is not None:
92 | reader = list(reader)[:limit]
93 | for row in reader:
94 | try:
95 | new_x = row[text_col]
96 | X.append(new_x)
97 | raw_y = row[class_col]
98 | y.append(raw_y)
99 |
100 | except Exception as e:
101 | print(e)
102 |
103 | return X, y
104 |
105 |
106 | def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None):
107 | """Process text and save as Dataset
108 | """
109 | if train and limit_top_tokens is not None:
110 | tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens)
111 |
112 | X_encoded = tokenizer.encode_texts(X)
113 |
114 | if ngrams is not None:
115 | X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train)
116 |
117 | X_padded = tokenizer.pad_sequences(
118 | X_encoded, fixed_token_seq_length=max_len)
119 |
120 | if train:
121 | ds = Dataset(X_padded,
122 | y, tokenizer=tokenizer)
123 | else:
124 | ds = Dataset(X_padded, y)
125 |
126 | ds.save(proc_data_path)
127 |
128 |
129 | def setup_data(X, y, tokenizer, proc_data_path, **kwargs):
130 | """Setup data
131 |
132 | Args:
133 | X: text data,
134 | y: data labels,
135 | tokenizer: A Tokenizer instance
136 | proc_data_path: Path for the processed data
137 | """
138 | # only build vocabulary once (e.g. training data)
139 | train = not tokenizer.has_vocab
140 | if train:
141 | tokenizer.build_vocab(X)
142 |
143 | process_save(X, y, tokenizer, proc_data_path,
144 | train=train, **kwargs)
145 | return tokenizer
146 |
147 |
148 | def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
149 | """Splits data into a training, validation, and test set.
150 |
151 | Args:
152 | X: text data
153 | y: data labels
154 | ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
155 |
156 | Returns:
157 | split data: X_train, X_val, X_test, y_train, y_val, y_test
158 | """
159 | assert(sum(ratio) == 1 and len(ratio) == 3)
160 | X_train, X_rest, y_train, y_rest = train_test_split(
161 | X, y, train_size=ratio[0])
162 | X_val, X_test, y_val, y_test = train_test_split(
163 | X_rest, y_rest, train_size=ratio[1])
164 | return X_train, X_val, X_test, y_train, y_val, y_test
165 |
166 |
167 | def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):
168 | """Setup data while splitting into a training, validation, and test set.
169 |
170 | Args:
171 | X: text data,
172 | y: data labels,
173 | tokenizer: A Tokenizer instance
174 | proc_data_dir: Directory for the split and processed data
175 | """
176 | X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
177 |
178 | # only build vocabulary on training data
179 | tokenizer.build_vocab(X_train)
180 |
181 | process_save(X_train, y_train, tokenizer, path.join(
182 | proc_data_dir, 'train.bin'), train=True, **kwargs)
183 | process_save(X_val, y_val, tokenizer, path.join(
184 | proc_data_dir, 'val.bin'), **kwargs)
185 | process_save(X_test, y_test, tokenizer, path.join(
186 | proc_data_dir, 'test.bin'), **kwargs)
187 |
188 |
189 | def load_data_split(proc_data_dir):
190 | """Loads a split dataset
191 |
192 | Args:
193 | proc_data_dir: Directory with the split and processed data
194 |
195 | Returns:
196 | (Training Data, Validation Data, Test Data)
197 | """
198 | ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin'))
199 | ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin'))
200 | ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin'))
201 | return ds_train, ds_val, ds_test
202 |
--------------------------------------------------------------------------------
/texcla/libs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jfilter/text-classification-keras/85882306d00242a4e6ead90d94f8a1f98a86535a/texcla/libs/__init__.py
--------------------------------------------------------------------------------
/texcla/libs/fastTextWikiTokenizer/__init__.py:
--------------------------------------------------------------------------------
1 | from .tokenize import tokenize
2 |
--------------------------------------------------------------------------------
/texcla/libs/fastTextWikiTokenizer/subprocess_fix.py:
--------------------------------------------------------------------------------
1 | # https://stackoverflow.com/a/30064888/4028896
2 | # make it work with python2.7
3 |
4 | from subprocess import Popen, PIPE, CalledProcessError
5 |
6 |
7 | def check_output_input(*popenargs, **kwargs):
8 | """Run command with arguments and return its output as a byte string.
9 |
10 | If the exit code was non-zero it raises a CalledProcessError. The
11 | CalledProcessError object will have the return code in the returncode
12 | attribute and output in the output attribute.
13 |
14 | The arguments are the same as for the Popen constructor. Example:
15 |
16 | >>> check_output(["ls", "-l", "/dev/null"])
17 | 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
18 |
19 | The stdout argument is not allowed as it is used internally.
20 | To capture standard error in the result, use stderr=STDOUT.
21 |
22 | >>> check_output(["/bin/sh", "-c",
23 | ... "ls -l non_existent_file ; exit 0"],
24 | ... stderr=STDOUT)
25 | 'ls: non_existent_file: No such file or directory\n'
26 |
27 | There is an additional optional argument, "input", allowing you to
28 | pass a string to the subprocess's stdin. If you use this argument
29 | you may not also use the Popen constructor's "stdin" argument, as
30 | it too will be used internally. Example:
31 |
32 | >>> check_output(["sed", "-e", "s/foo/bar/"],
33 | ... input=b"when in the course of fooman events\n")
34 | b'when in the course of barman events\n'
35 |
36 | If universal_newlines=True is passed, the return value will be a
37 | string rather than bytes.
38 |
39 | """
40 | if 'stdout' in kwargs:
41 | raise ValueError('stdout argument not allowed, it will be overridden.')
42 | if 'input' in kwargs:
43 | if 'stdin' in kwargs:
44 | raise ValueError('stdin and input arguments may not both be used.')
45 | inputdata = kwargs['input']
46 | del kwargs['input']
47 | kwargs['stdin'] = PIPE
48 | else:
49 | inputdata = None
50 | process = Popen(*popenargs, stdout=PIPE, **kwargs)
51 | try:
52 | output, unused_err = process.communicate(inputdata)
53 | except:
54 | process.kill()
55 | process.wait()
56 | raise
57 | retcode = process.poll()
58 | if retcode:
59 | cmd = kwargs.get("args")
60 | if cmd is None:
61 | cmd = popenargs[0]
62 | raise CalledProcessError(retcode, cmd, output=output)
63 | return output
64 |
--------------------------------------------------------------------------------
/texcla/libs/fastTextWikiTokenizer/tokenize.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # taken from: https://gist.github.com/jfilter/28e019b5d6c62ebdc87721862ea0867c
3 | # Taken from: https: // gist.github.com/bittlingmayer/7139a6a75ba0dbbc3a06325394ae3a13
4 |
5 | # See https://github.com/facebookresearch/fastText/blob/master/get-wikimedia.sh
6 | #
7 | # From https://github.com/facebookresearch/fastText/issues/161:
8 | # See also: https://github.com/facebookresearch/fastText/issues/401
9 | #
10 | # We now have a script called 'get-wikimedia.sh', that you can use to download and
11 | # process a recent wikipedia dump of any language. This script applies the preprocessing
12 | # we used to create the published word vectors.
13 | #
14 | # The parameters we used to build the word vectors are the default skip-gram settings,
15 | # except with a dimensionality of 300 as indicated on the top of the list of word
16 | # vectors (we now understand that this could be more visible).
17 |
18 | # See also: known issues with the original script https://github.com/facebookresearch/fastText/issues/281, which unfortunately we must re-implement here.
19 |
20 | '''
21 | sed -e "s/’/'/g" -e "s/′/'/g" -e "s/''/ /g" -e "s/'/ ' /g" -e "s/“/\"/g" -e "s/”/\"/g" \
22 | -e 's/"/ " /g' -e 's/\./ \. /g' -e 's/
/ /g' -e 's/, / , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
23 | -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' -e 's/-/ - /g' -e 's/=/ /g' -e 's/=/ /g' -e 's/*/ /g' -e 's/|/ /g' \
24 | -e 's/«/ /g' | tr 0-9 " "
25 | '''
26 | import subprocess
27 |
28 | from . import subprocess_fix
29 |
30 | SUBEXES = ["s/’/'/g", "s/′/'/g", "s/''/ /g", "s/'/ ' /g", 's/“/"/g', 's/”/"/g', 's/"/ /g', "s/\\./ \\. /g", "s/
/ /g", "s/, / , /g",
31 | "s/(/ ( /g", "s/)/ ) /g", "s/\\!/ \\! /g", "s/\\?/ \\? /g", "s/\\;/ /g", "s/\\:/ /g", "s/-/ - /g", "s/=/ /g", "s/=/ /g", "s/*/ /g", "s/|/ /g", "s/«/ /g"]
32 |
33 |
34 | def __normalize_text(s):
35 | commands = ['sed']
36 | for sb in SUBEXES:
37 | commands.append('-e')
38 | commands.append(sb)
39 |
40 | s = subprocess_fix.check_output_input(
41 | commands, input=s.encode()).decode("utf-8")
42 | return s
43 |
44 | # Program to filter Wikipedia XML dumps to "clean" text consisting only of lowercase
45 | # letters (a-z, converted from A-Z), and spaces (never consecutive)...
46 | # All other characters are converted to spaces. Only text which normally appears.
47 | # in the web browser is displayed. Tables are removed. Image captions are.
48 | # preserved. Links are converted to normal text. Digits are spelled out.
49 | # *** Modified to not spell digits or throw away non-ASCII characters ***
50 | # Written by Matt Mahoney, June 10, 2006. This program is released to the public domain.
51 |
52 |
53 | def __spaces(s):
54 | return ' '.join(s.split())
55 |
56 |
57 | def __digits(s):
58 | return ''.join(filter(lambda c: not c.isdigit(), s))
59 |
60 |
61 | def preproc(s):
62 | return __digits(__spaces(__normalize_text(s.lower())))
63 |
64 |
65 | def tokenize(s):
66 | return(preproc(s).split())
67 |
68 | # Example output:
69 | #
70 | # >>> preproc("Г. Шмидт, можно сказать «Давай давай!»?")
71 | # 'г . шмидт , можно сказать давай давай ! » ?'
72 | # >>> preproc('It won 1st place in the 3D film contest.')
73 | # 'it won st place in the d film contest .'
74 |
--------------------------------------------------------------------------------
/texcla/libs/ngrams/__init__.py:
--------------------------------------------------------------------------------
1 | from .ngrams import *
2 |
--------------------------------------------------------------------------------
/texcla/libs/ngrams/ngrams.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import unicode_literals
3 |
4 | # https://github.com/keras-team/keras/blob/master/examples/imdb_fasttext.py
5 |
6 | # COPYRIGHT
7 |
8 | # All contributions by François Chollet:
9 | # Copyright (c) 2015 - 2018, François Chollet.
10 | # All rights reserved.
11 |
12 | # All contributions by Google:
13 | # Copyright (c) 2015 - 2018, Google, Inc.
14 | # All rights reserved.
15 |
16 | # All contributions by Microsoft:
17 | # Copyright (c) 2017 - 2018, Microsoft, Inc.
18 | # All rights reserved.
19 |
20 | # All other contributions:
21 | # Copyright (c) 2015 - 2018, the respective contributors.
22 | # All rights reserved.
23 |
24 | # Each contributor holds copyright over their respective contributions.
25 | # The project versioning (Git) records all such contribution source information.
26 |
27 | # LICENSE
28 |
29 | # The MIT License (MIT)
30 |
31 | # Permission is hereby granted, free of charge, to any person obtaining a copy
32 | # of this software and associated documentation files (the "Software"), to deal
33 | # in the Software without restriction, including without limitation the rights
34 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
35 | # copies of the Software, and to permit persons to whom the Software is
36 | # furnished to do so, subject to the following conditions:
37 |
38 | # The above copyright notice and this permission notice shall be included in all
39 | # copies or substantial portions of the Software.
40 |
41 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
44 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
46 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
47 | # SOFTWARE.
48 |
49 |
50 | def create_ngram_set(input_list, ngram_value=2):
51 | """
52 | Extract a set of n-grams from a list of integers.
53 | >>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
54 | {(4, 9), (4, 1), (1, 4), (9, 4)}
55 | >>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
56 | [(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
57 | """
58 | return set(zip(*[input_list[i:] for i in range(ngram_value)]))
59 |
60 |
61 | def add_ngram(sequences, token_indice, ngram_range=2):
62 | """
63 | Augment the input list of list (sequences) by appending n-grams values.
64 | Example: adding bi-gram
65 | >>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
66 | >>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
67 | >>> add_ngram(sequences, token_indice, ngram_range=2)
68 | [[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
69 | Example: adding tri-gram
70 | >>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
71 | >>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9, 2): 2018}
72 | >>> add_ngram(sequences, token_indice, ngram_range=3)
73 | [[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42, 2018]]
74 | """
75 | new_sequences = []
76 | for input_list in sequences:
77 | new_list = input_list[:]
78 | for ngram_value in range(2, ngram_range + 1):
79 | for i in range(len(new_list) - ngram_value + 1):
80 | ngram = tuple(new_list[i:i + ngram_value])
81 | if ngram in token_indice:
82 | new_list.append(token_indice[ngram])
83 | new_sequences.append(new_list)
84 | return new_sequences
85 |
--------------------------------------------------------------------------------
/texcla/libs/twokenize/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
--------------------------------------------------------------------------------
/texcla/libs/twokenize/__init__.py:
--------------------------------------------------------------------------------
1 | from .twokenize import tokenize
2 |
--------------------------------------------------------------------------------
/texcla/libs/twokenize/twokenize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Twokenize -- a tokenizer designed for Twitter text in English and some other European languages.
5 | This tokenizer code has gone through a long history:
6 |
7 | (1) Brendan O'Connor wrote original version in Python, http://github.com/brendano/tweetmotif
8 | TweetMotif: Exploratory Search and Topic Summarization for Twitter.
9 | Brendan O'Connor, Michel Krieger, and David Ahn.
10 | ICWSM-2010 (demo track), http://brenocon.com/oconnor_krieger_ahn.icwsm2010.tweetmotif.pdf
11 | (2a) Kevin Gimpel and Daniel Mills modified it for POS tagging for the CMU ARK Twitter POS Tagger
12 | (2b) Jason Baldridge and David Snyder ported it to Scala
13 | (3) Brendan bugfixed the Scala port and merged with POS-specific changes
14 | for the CMU ARK Twitter POS Tagger
15 | (4) Tobi Owoputi ported it back to Java and added many improvements (2012-06)
16 |
17 | Current home is http://github.com/brendano/ark-tweet-nlp and http://www.ark.cs.cmu.edu/TweetNLP
18 |
19 | There have been at least 2 other Java ports, but they are not in the lineage for the code here.
20 |
21 | Ported to Python by Myle Ott : https://github.com/myleott/ark-twokenize-py
22 | """
23 | from __future__ import unicode_literals
24 |
25 | import operator
26 | import re
27 | import sys
28 |
29 | try:
30 | from html.parser import HTMLParser
31 | except ImportError:
32 | from HTMLParser import HTMLParser
33 |
34 | try:
35 | import html
36 | except ImportError:
37 | pass
38 |
39 | def regex_or(*items):
40 | return '(?:' + '|'.join(items) + ')'
41 |
42 | Contractions = re.compile(u"(?i)(\w+)(n['’′]t|['’′]ve|['’′]ll|['’′]d|['’′]re|['’′]s|['’′]m)$", re.UNICODE)
43 | Whitespace = re.compile(u"[\s\u0020\u00a0\u1680\u180e\u202f\u205f\u3000\u2000-\u200a]+", re.UNICODE)
44 |
45 | punctChars = r"['\"“”‘’.?!…,:;]"
46 | #punctSeq = punctChars+"+" #'anthem'. => ' anthem '.
47 | punctSeq = r"['\"“”‘’]+|[.?!,…]+|[:;]+" #'anthem'. => ' anthem ' .
48 | entity = r"&(?:amp|lt|gt|quot);"
49 | # URLs
50 |
51 |
52 | # BTO 2012-06: everyone thinks the daringfireball regex should be better, but they're wrong.
53 | # If you actually empirically test it the results are bad.
54 | # Please see https://github.com/brendano/ark-tweet-nlp/pull/9
55 |
56 | urlStart1 = r"(?:https?://|\bwww\.)"
57 | commonTLDs = r"(?:com|org|edu|gov|net|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|pro|tel|travel|xxx)"
58 | ccTLDs = r"(?:ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|" + \
59 | r"bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|" + \
60 | r"er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|" + \
61 | r"hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|" + \
62 | r"lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|" + \
63 | r"nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|" + \
64 | r"sl|sm|sn|so|sr|ss|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|" + \
65 | r"va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|za|zm|zw)" #TODO: remove obscure country domains?
66 | urlStart2 = r"\b(?:[A-Za-z\d-])+(?:\.[A-Za-z0-9]+){0,3}\." + regex_or(commonTLDs, ccTLDs) + r"(?:\."+ccTLDs+r")?(?=\W|$)"
67 | urlBody = r"(?:[^\.\s<>][^\s<>]*?)?"
68 | urlExtraCrapBeforeEnd = regex_or(punctChars, entity) + "+?"
69 | urlEnd = r"(?:\.\.+|[<>]|\s|$)"
70 | url = regex_or(urlStart1, urlStart2) + urlBody + "(?=(?:"+urlExtraCrapBeforeEnd+")?"+urlEnd+")"
71 |
72 |
73 | # Numeric
74 | timeLike = r"\d+(?::\d+){1,2}"
75 | #numNum = r"\d+\.\d+"
76 | numberWithCommas = r"(?:(?|>)[\._-]+(?:<|<|>|>)"
112 | s5 = "(?:[.][_]+[.])"
113 | # myleott: in Python the (?i) flag affects the whole expression
114 | #basicface = "(?:(?i)" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
115 | basicface = "(?:" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
116 |
117 | eeLeft = r"[\\\ƪԄ\((<>;ヽ\-=~\*]+"
118 | eeRight= u"[\\-=\\);'\u0022<>ʃ)//ノノ丿╯σっµ~\\*]+"
119 | eeSymbol = r"[^A-Za-z0-9\s\(\)\*:=-]"
120 | eastEmote = eeLeft + "(?:"+basicface+"|" +eeSymbol+")+" + eeRight
121 |
122 | oOEmote = r"(?:[oO]" + bfCenter + r"[oO])"
123 |
124 |
125 | emoticon = regex_or(
126 | # Standard version :) :( :] :D :P
127 | "(?:>|>)?" + regex_or(normalEyes, wink) + regex_or(noseArea,"[Oo]") + regex_or(tongue+r"(?=\W|$|RT|rt|Rt)", otherMouths+r"(?=\W|$|RT|rt|Rt)", sadMouths, happyMouths),
128 |
129 | # reversed version (: D: use positive lookbehind to remove "(word):"
130 | # because eyes on the right side is more ambiguous with the standard usage of : ;
131 | regex_or("(?<=(?: ))", "(?<=(?:^))") + regex_or(sadMouths,happyMouths,otherMouths) + noseArea + regex_or(normalEyes, wink) + "(?:<|<)?",
132 |
133 | #inspired by http://en.wikipedia.org/wiki/User:Scapler/emoticons#East_Asian_style
134 | eastEmote.replace("2", "1", 1), basicface,
135 | # iOS 'emoji' characters (some smileys, some symbols) [\ue001-\uebbb]
136 | # TODO should try a big precompiled lexicon from Wikipedia, Dan Ramage told me (BTO) he does this
137 |
138 | # myleott: o.O and O.o are two of the biggest sources of differences
139 | # between this and the Java version. One little hack won't hurt...
140 | oOEmote
141 | )
142 |
143 | Hearts = "(?:<+/?3+)+" #the other hearts are in decorations
144 |
145 | Arrows = regex_or(r"(?:<*[-―—=]*>+|<+[-―—=]*>*)", u"[\u2190-\u21ff]+")
146 |
147 | # BTO 2011-06: restored Hashtag, AtMention protection (dropped in original scala port) because it fixes
148 | # "hello (#hashtag)" ==> "hello (#hashtag )" WRONG
149 | # "hello (#hashtag)" ==> "hello ( #hashtag )" RIGHT
150 | # "hello (@person)" ==> "hello (@person )" WRONG
151 | # "hello (@person)" ==> "hello ( @person )" RIGHT
152 | # ... Some sort of weird interaction with edgepunct I guess, because edgepunct
153 | # has poor content-symbol detection.
154 |
155 | # This also gets #1 #40 which probably aren't hashtags .. but good as tokens.
156 | # If you want good hashtag identification, use a different regex.
157 | Hashtag = "#[a-zA-Z0-9_]+" #optional: lookbehind for \b
158 | #optional: lookbehind for \b, max length 15
159 | AtMention = "[@@][a-zA-Z0-9_]+"
160 |
161 | # I was worried this would conflict with at-mentions
162 | # but seems ok in sample of 5800: 7 changes all email fixes
163 | # http://www.regular-expressions.info/email.html
164 | Bound = r"(?:\W|^|$)"
165 | Email = regex_or("(?<=(?:\W))", "(?<=(?:^))") + r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}(?=" +Bound+")"
166 |
167 | # We will be tokenizing using these regexps as delimiters
168 | # Additionally, these things are "protected", meaning they shouldn't be further split themselves.
169 | Protected = re.compile(
170 | regex_or(
171 | Hearts,
172 | url,
173 | Email,
174 | timeLike,
175 | #numNum,
176 | numberWithCommas,
177 | numComb,
178 | emoticon,
179 | Arrows,
180 | entity,
181 | punctSeq,
182 | arbitraryAbbrev,
183 | separators,
184 | decorations,
185 | embeddedApostrophe,
186 | Hashtag,
187 | AtMention), re.UNICODE)
188 |
189 | # Edge punctuation
190 | # Want: 'foo' => ' foo '
191 | # While also: don't => don't
192 | # the first is considered "edge punctuation".
193 | # the second is word-internal punctuation -- don't want to mess with it.
194 | # BTO (2011-06): the edgepunct system seems to be the #1 source of problems these days.
195 | # I remember it causing lots of trouble in the past as well. Would be good to revisit or eliminate.
196 |
197 | # Note the 'smart quotes' (http://en.wikipedia.org/wiki/Smart_quotes)
198 | #edgePunctChars = r"'\"“”‘’«»{}\(\)\[\]\*&" #add \\p{So}? (symbols)
199 | edgePunctChars = u"'\"“”‘’«»{}\\(\\)\\[\\]\\*&" #add \\p{So}? (symbols)
200 | edgePunct = "[" + edgePunctChars + "]"
201 | notEdgePunct = "[a-zA-Z0-9]" # content characters
202 | offEdge = r"(^|$|:|;|\s|\.|,)" # colon here gets "(hello):" ==> "( hello ):"
203 | EdgePunctLeft = re.compile(offEdge + "("+edgePunct+"+)("+notEdgePunct+")", re.UNICODE)
204 | EdgePunctRight = re.compile("("+notEdgePunct+")("+edgePunct+"+)" + offEdge, re.UNICODE)
205 |
206 | def splitEdgePunct(input):
207 | input = EdgePunctLeft.sub(r"\1\2 \3", input)
208 | input = EdgePunctRight.sub(r"\1 \2\3", input)
209 | return input
210 |
211 | # The main work of tokenizing a tweet.
212 | def simpleTokenize(text):
213 |
214 | # Do the no-brainers first
215 | splitPunctText = splitEdgePunct(text)
216 |
217 | textLength = len(splitPunctText)
218 |
219 | # BTO: the logic here got quite convoluted via the Scala porting detour
220 | # It would be good to switch back to a nice simple procedural style like in the Python version
221 | # ... Scala is such a pain. Never again.
222 |
223 | # Find the matches for subsequences that should be protected,
224 | # e.g. URLs, 1.0, U.N.K.L.E., 12:53
225 | bads = []
226 | badSpans = []
227 | for match in Protected.finditer(splitPunctText):
228 | # The spans of the "bads" should not be split.
229 | if (match.start() != match.end()): #unnecessary?
230 | bads.append( [splitPunctText[match.start():match.end()]] )
231 | badSpans.append( (match.start(), match.end()) )
232 |
233 | # Create a list of indices to create the "goods", which can be
234 | # split. We are taking "bad" spans like
235 | # List((2,5), (8,10))
236 | # to create
237 | # List(0, 2, 5, 8, 10, 12)
238 | # where, e.g., "12" here would be the textLength
239 | # has an even length and no indices are the same
240 | indices = [0]
241 | for (first, second) in badSpans:
242 | indices.append(first)
243 | indices.append(second)
244 | indices.append(textLength)
245 |
246 | # Group the indices and map them to their respective portion of the string
247 | splitGoods = []
248 | for i in range(0, len(indices), 2):
249 | goodstr = splitPunctText[indices[i]:indices[i+1]]
250 | splitstr = goodstr.strip().split(" ")
251 | splitGoods.append(splitstr)
252 |
253 | # Reinterpolate the 'good' and 'bad' Lists, ensuring that
254 | # additonal tokens from last good item get included
255 | zippedStr = []
256 | for i in range(len(bads)):
257 | zippedStr = addAllnonempty(zippedStr, splitGoods[i])
258 | zippedStr = addAllnonempty(zippedStr, bads[i])
259 | zippedStr = addAllnonempty(zippedStr, splitGoods[len(bads)])
260 |
261 | # BTO: our POS tagger wants "ur" and "you're" to both be one token.
262 | # Uncomment to get "you 're"
263 | #splitStr = []
264 | #for tok in zippedStr:
265 | # splitStr.extend(splitToken(tok))
266 | #zippedStr = splitStr
267 |
268 | return zippedStr
269 |
270 | def addAllnonempty(master, smaller):
271 | for s in smaller:
272 | strim = s.strip()
273 | if (len(strim) > 0):
274 | master.append(strim)
275 | return master
276 |
277 | # "foo bar " => "foo bar"
278 | def squeezeWhitespace(input):
279 | return Whitespace.sub(" ", input).strip()
280 |
281 | # Final pass tokenization based on special patterns
282 | def splitToken(token):
283 | m = Contractions.search(token)
284 | if m:
285 | return [m.group(1), m.group(2)]
286 | return [token]
287 |
288 | # Assume 'text' has no HTML escaping.
289 | def tokenize(text):
290 | return simpleTokenize(squeezeWhitespace(text))
291 |
292 |
293 | # Twitter text comes HTML-escaped, so unescape it.
294 | # We also first unescape &'s, in case the text has been buggily double-escaped.
295 | def normalizeTextForTagger(text):
296 | assert sys.version_info[0] >= 3 and sys.version_info[1] > 3, 'Python version >3.3 required'
297 | text = text.replace("&", "&")
298 | text = html.unescape(text)
299 | return text
300 |
301 | # This is intended for raw tweet text -- we do some HTML entity unescaping before running the tagger.
302 | #
303 | # This function normalizes the input text BEFORE calling the tokenizer.
304 | # So the tokens you get back may not exactly correspond to
305 | # substrings of the original text.
306 | def tokenizeRawTweetText(text):
307 | tokens = tokenize(normalizeTextForTagger(text))
308 | return tokens
309 |
310 |
311 | if __name__ == '__main__':
312 | for line in sys.stdin:
313 | print(' '.join(tokenizeRawTweetText(line)))
314 |
--------------------------------------------------------------------------------
/texcla/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .token_model import TokenModelFactory
2 | from .sentence_model import SentenceModelFactory
3 | from .sequence_encoders import *
4 |
--------------------------------------------------------------------------------
/texcla/models/layers.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 |
5 | from keras import backend as K
6 | from keras import constraints, initializers, regularizers
7 | from keras.layers import Layer
8 |
9 |
10 | def _softmax(x, dim):
11 | """Computes softmax along a specified dim. Keras currently lacks this feature.
12 | """
13 |
14 | if K.backend() == 'tensorflow':
15 | import tensorflow as tf
16 | return tf.nn.softmax(x, dim)
17 | elif K.backend() is 'cntk':
18 | import cntk
19 | return cntk.softmax(x, dim)
20 | elif K.backend() == 'theano':
21 | # Theano cannot softmax along an arbitrary dim.
22 | # So, we will shuffle `dim` to -1 and un-shuffle after softmax.
23 | perm = np.arange(K.ndim(x))
24 | perm[dim], perm[-1] = perm[-1], perm[dim]
25 | x_perm = K.permute_dimensions(x, perm)
26 | output = K.softmax(x_perm)
27 |
28 | # Permute back
29 | perm[dim], perm[-1] = perm[-1], perm[dim]
30 | output = K.permute_dimensions(x, output)
31 | return output
32 | else:
33 | raise ValueError("Backend '{}' not supported".format(K.backend()))
34 |
35 |
36 | class AttentionLayer(Layer):
37 | """Attention layer that computes a learned attention over input sequence.
38 |
39 | For details, see papers:
40 | - https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf
41 | - http://colinraffel.com/publications/iclr2016feed.pdf (fig 1)
42 |
43 | Input:
44 | x: Input tensor of shape `(..., time_steps, features)` where `features` must be static (known).
45 |
46 | Output:
47 | 2D tensor of shape `(..., features)`. i.e., `time_steps` axis is attended over and reduced.
48 | """
49 |
50 | def __init__(self,
51 | kernel_initializer='he_normal',
52 | kernel_regularizer=None,
53 | kernel_constraint=None,
54 | use_bias=True,
55 | bias_initializer='zeros',
56 | bias_regularizer=None,
57 | bias_constraint=None,
58 | use_context=True,
59 | context_initializer='he_normal',
60 | context_regularizer=None,
61 | context_constraint=None,
62 | attention_dims=None,
63 | **kwargs):
64 | """
65 | Args:
66 | attention_dims: The dimensionality of the inner attention calculating neural network.
67 | For input `(32, 10, 300)`, with `attention_dims` of 100, the output is `(32, 10, 100)`.
68 | i.e., the attended words are 100 dimensional. This is then collapsed via summation to
69 | `(32, 10, 1)` to indicate the attention weights for 10 words.
70 | If set to None, `features` dims are used as `attention_dims`. (Default value: None)
71 | """
72 | if 'input_shape' not in kwargs and 'input_dim' in kwargs:
73 | kwargs['input_shape'] = (kwargs.pop('input_dim'),)
74 |
75 | super(AttentionLayer, self).__init__(**kwargs)
76 | self.kernel_initializer = initializers.get(kernel_initializer)
77 | self.kernel_regularizer = regularizers.get(kernel_regularizer)
78 | self.kernel_constraint = constraints.get(kernel_constraint)
79 |
80 | self.use_bias = use_bias
81 | self.bias_initializer = initializers.get(bias_initializer)
82 | self.bias_regularizer = regularizers.get(bias_regularizer)
83 | self.bias_constraint = constraints.get(bias_constraint)
84 |
85 | self.use_context = use_context
86 | self.context_initializer = initializers.get(context_initializer)
87 | self.context_regularizer = regularizers.get(context_regularizer)
88 | self.context_constraint = constraints.get(context_constraint)
89 |
90 | self.attention_dims = attention_dims
91 | self.supports_masking = True
92 |
93 | def build(self, input_shape):
94 | if len(input_shape) < 3:
95 | raise ValueError(
96 | "Expected input shape of `(..., time_steps, features)`, found `{}`".format(input_shape))
97 |
98 | attention_dims = input_shape[-1] if self.attention_dims is None else self.attention_dims
99 | self.kernel = self.add_weight(shape=(input_shape[-1], attention_dims),
100 | initializer=self.kernel_initializer,
101 | name='kernel',
102 | regularizer=self.kernel_regularizer,
103 | constraint=self.kernel_constraint)
104 |
105 | if self.use_bias:
106 | self.bias = self.add_weight(shape=(attention_dims, ),
107 | initializer=self.bias_initializer,
108 | name='bias',
109 | regularizer=self.bias_regularizer,
110 | constraint=self.bias_constraint)
111 | else:
112 | self.bias = None
113 |
114 | if self.use_context:
115 | self.context_kernel = self.add_weight(shape=(attention_dims, ),
116 | initializer=self.context_initializer,
117 | name='context_kernel',
118 | regularizer=self.context_regularizer,
119 | constraint=self.context_constraint)
120 | else:
121 | self.context_kernel = None
122 |
123 | super(AttentionLayer, self).build(input_shape)
124 |
125 | def call(self, x, mask=None):
126 | # x: [..., time_steps, features]
127 | # ut = [..., time_steps, attention_dims]
128 | ut = K.dot(x, self.kernel)
129 | if self.use_bias:
130 | ut = K.bias_add(ut, self.bias)
131 |
132 | ut = K.tanh(ut)
133 | if self.use_context:
134 | ut = ut * self.context_kernel
135 |
136 | # Collapse `attention_dims` to 1. This indicates the weight for each time_step.
137 | ut = K.sum(ut, axis=-1, keepdims=True)
138 |
139 | # Convert those weights into a distribution but along time axis.
140 | # i.e., sum of alphas along `time_steps` axis should be 1.
141 | self.at = _softmax(ut, dim=1)
142 | if mask is not None:
143 | self.at *= K.cast(K.expand_dims(mask, -1), K.floatx())
144 |
145 | # Weighted sum along `time_steps` axis.
146 | return K.sum(x * self.at, axis=-2)
147 |
148 | def compute_mask(self, input, input_mask=None):
149 | # do not pass the mask to the next layers
150 | return None
151 |
152 | def compute_output_shape(self, input_shape):
153 | return input_shape[0], input_shape[-1]
154 |
155 | def get_attention_tensor(self):
156 | if not hasattr(self, 'at'):
157 | raise ValueError(
158 | 'Attention tensor is available after calling this layer with an input')
159 | return self.at
160 |
161 | def get_config(self):
162 | config = {
163 | 'kernel_initializer': initializers.serialize(self.kernel_initializer),
164 | 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
165 | 'kernel_constraint': constraints.serialize(self.kernel_constraint),
166 | 'bias_initializer': initializers.serialize(self.bias_initializer),
167 | 'bias_regularizer': regularizers.serialize(self.bias_regularizer),
168 | 'bias_constraint': constraints.serialize(self.bias_constraint),
169 | 'context_initializer': initializers.serialize(self.context_initializer),
170 | 'context_regularizer': regularizers.serialize(self.context_regularizer),
171 | 'context_constraint': constraints.serialize(self.context_constraint)
172 | }
173 | base_config = super(AttentionLayer, self).get_config()
174 | return dict(list(base_config.items()) + list(config.items()))
175 |
--------------------------------------------------------------------------------
/texcla/models/sentence_model.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from keras.layers import Dense, Embedding, Input, TimeDistributed
4 | from keras.models import Model
5 |
6 | from ..embeddings import build_embedding_weights, get_embeddings_index
7 | from .sequence_encoders import SequenceEncoderBase
8 |
9 |
10 | class SentenceModelFactory(object):
11 | def __init__(self, num_classes, token_index, max_sents, max_tokens,
12 | embedding_type='glove.6B.100d', embedding_dims=100):
13 | """Creates a `SentenceModelFactory` instance for building various models that operate over
14 | (samples, max_sentences, max_tokens) input.
15 |
16 | Args:
17 | num_classes: The number of output classes.
18 | token_index: The dictionary of token and its corresponding integer index value.
19 | max_sents: The max number of sentences in a document.
20 | max_tokens: The max number of tokens in a sentence.
21 | embedding_type: The embedding type to use. Set to None to use random embeddings.
22 | (Default value: 'glove.6B.100d')
23 | embedding_dims: The number of embedding dims to use for representing a word. This argument will be ignored
24 | when `embedding_type` is set. (Default value: 100)
25 | """
26 | self.num_classes = num_classes
27 | self.token_index = token_index
28 | self.max_sents = max_sents
29 | self.max_tokens = max_tokens
30 |
31 | # This is required to make TimeDistributed(word_encoder_model) work.
32 | # TODO: Get rid of this restriction when https://github.com/fchollet/keras/issues/6917 resolves.
33 | if self.max_tokens is None:
34 | raise ValueError('`max_tokens` should be provided.')
35 |
36 | if embedding_type is not None:
37 | self.embeddings_index = get_embeddings_index(
38 | embedding_type, embedding_dims)
39 | self.embedding_dims = list(self.embeddings_index.values())[
40 | 0].shape[-1]
41 | else:
42 | self.embeddings_index = None
43 | self.embedding_dims = embedding_dims
44 |
45 | def build_model(self, token_encoder_model, sentence_encoder_model,
46 | trainable_embeddings=True, output_activation='softmax'):
47 | """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by
48 | `sentence_encoder_model`.
49 |
50 | Args:
51 | token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model
52 | will be applied across all sentences to create a sentence encoding.
53 | sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by
54 | `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification.
55 | trainable_embeddings: Whether or not to fine tune embeddings.
56 | output_activation: The output activation to use. (Default value: 'softmax')
57 | Use:
58 | - `softmax` for binary or multi-class.
59 | - `sigmoid` for multi-label classification.
60 | - `linear` for regression output.
61 |
62 | Returns:
63 | The model output tensor.
64 | """
65 | if not isinstance(token_encoder_model, SequenceEncoderBase):
66 | raise ValueError("`token_encoder_model` should be an instance of `{}`".format(
67 | SequenceEncoderBase))
68 | if not isinstance(sentence_encoder_model, SequenceEncoderBase):
69 | raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format(
70 | SequenceEncoderBase))
71 |
72 | if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None:
73 | raise ValueError("Sentence encoder model '{}' requires padding. "
74 | "You need to provide `max_sents`")
75 |
76 | if self.embeddings_index is None:
77 | # The +1 is for unknown token index 0.
78 | embedding_layer = Embedding(len(self.token_index),
79 | self.embedding_dims,
80 | input_length=self.max_tokens,
81 | mask_zero=token_encoder_model.allows_dynamic_length(),
82 | trainable=trainable_embeddings)
83 | else:
84 | embedding_layer = Embedding(len(self.token_index),
85 | self.embedding_dims,
86 | weights=[build_embedding_weights(
87 | self.token_index, self.embeddings_index)],
88 | input_length=self.max_tokens,
89 | mask_zero=token_encoder_model.allows_dynamic_length(),
90 | trainable=trainable_embeddings)
91 |
92 | word_input = Input(shape=(self.max_tokens,), dtype='int32')
93 | x = embedding_layer(word_input)
94 | word_encoding = token_encoder_model(x)
95 | token_encoder_model = Model(
96 | word_input, word_encoding, name='word_encoder')
97 |
98 | doc_input = Input(
99 | shape=(self.max_sents, self.max_tokens), dtype='int32')
100 | sent_encoding = TimeDistributed(token_encoder_model)(doc_input)
101 | x = sentence_encoder_model(sent_encoding)
102 |
103 | x = Dense(self.num_classes, activation=output_activation)(x)
104 | return Model(doc_input, x)
105 |
--------------------------------------------------------------------------------
/texcla/models/sequence_encoders.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from keras.layers import LSTM, Bidirectional, Conv1D, Dropout, GlobalAveragePooling1D, GlobalMaxPooling1D, MaxPooling1D, Dense, Flatten, GRU
4 | from keras.layers.merge import Concatenate, concatenate
5 |
6 | from .layers import AttentionLayer
7 | from ..utils.format import to_fixed_digits
8 |
9 |
10 | class SequenceEncoderBase(object):
11 |
12 | def __init__(self, dropout_rate=0.5):
13 | """Creates a new instance of sequence encoder.
14 |
15 | Args:
16 | dropout_rate: The final encoded output dropout.
17 | """
18 | self.dropout_rate = dropout_rate
19 |
20 | def __call__(self, x):
21 | """Build the actual model here.
22 |
23 | Args:
24 | x: The encoded or embedded input sequence.
25 |
26 | Returns:
27 | The model output tensor.
28 | """
29 |
30 | x = self.build_model(x)
31 | if self.dropout_rate > 0:
32 | x = Dropout(self.dropout_rate)(x)
33 | return x
34 |
35 | def build_model(self, x):
36 | """Build your model graph here.
37 |
38 | Args:
39 | x: The encoded or embedded input sequence.
40 |
41 | Returns:
42 | The model output tensor without the classification block.
43 | """
44 | raise NotImplementedError()
45 |
46 | def allows_dynamic_length(self):
47 | """Return a boolean indicating whether this model is capable of handling variable time steps per mini-batch.
48 |
49 | For example, this should be True for RNN models since you can use them with variable time steps per mini-batch.
50 | CNNs on the other hand expect fixed time steps across all mini-batches.
51 | """
52 | # Assume default as False. Should be overridden as necessary.
53 | return False
54 |
55 |
56 | class YoonKimCNN(SequenceEncoderBase):
57 |
58 | def __init__(self, num_filters=64, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs):
59 | """Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
60 |
61 | Args:
62 | num_filters: The number of filters to use per `filter_size`. (Default value = 64)
63 | filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
64 | **cnn_kwargs: Additional args for building the `Conv1D` layer.
65 | """
66 | super(YoonKimCNN, self).__init__(dropout_rate)
67 | self.num_filters = num_filters
68 | self.filter_sizes = filter_sizes
69 | self.conv_kwargs = conv_kwargs
70 |
71 | def build_model(self, x):
72 | pooled_tensors = []
73 | for filter_size in self.filter_sizes:
74 | x_i = Conv1D(self.num_filters, filter_size,
75 | activation='elu', **self.conv_kwargs)(x)
76 | x_i = GlobalMaxPooling1D()(x_i)
77 | pooled_tensors.append(x_i)
78 |
79 | x = pooled_tensors[0] if len(
80 | self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1)
81 | return x
82 |
83 | def __str__(self):
84 | conv_kwargs_str = str(self.conv_kwargs) if len(
85 | self.conv_kwargs) > 0 else ''
86 | filter_sizes_li = [str(x) for x in self.filter_sizes]
87 | li = ['cnn_kim', str(self.num_filters)] + filter_sizes_li + [
88 | 'do', to_fixed_digits(self.dropout_rate), conv_kwargs_str]
89 | return '_'.join(li)
90 |
91 |
92 | class AlexCNN(SequenceEncoderBase):
93 | def __init__(self, num_filters=20, filter_sizes=[3, 8], dropout_rate=[0.5, 0.8], hidden_dims=20, **conv_kwargs):
94 | """Alexander Rakhlin's CNN model: https://github.com/alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras/
95 |
96 | Args:
97 | num_filters: The number of filters to use per `filter_size`. (Default value = 64)
98 | filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
99 | dropout_rate: Array for one dropout layer after the embedding and one before the final dense layer (Default value = [0.5, 0.8])
100 | """
101 | super(AlexCNN, self).__init__(dropout_rate[0])
102 | self.num_filters = num_filters
103 | self.filter_sizes = filter_sizes
104 | self.dropout_rate = dropout_rate[0]
105 | self.dropout_rate2 = dropout_rate[1]
106 | self.hidden_dims = hidden_dims
107 | self.conv_kwargs = conv_kwargs
108 |
109 | def build_model(self, x):
110 | conv_blocks = []
111 | for sz in self.filter_sizes:
112 | conv = Conv1D(filters=self.num_filters,
113 | kernel_size=sz,
114 | padding="valid",
115 | activation="relu",
116 | strides=1, **self.conv_kwargs)(x)
117 | conv = MaxPooling1D(pool_size=2)(conv)
118 | conv = Flatten()(conv)
119 | conv_blocks.append(conv)
120 |
121 | x = Concatenate()(conv_blocks) if len(
122 | conv_blocks) > 1 else conv_blocks[0]
123 |
124 | x = Dropout(self.dropout_rate2)(x)
125 | x = Dense(self.hidden_dims, activation="relu")(x)
126 | return x
127 |
128 | def __str__(self):
129 | conv_kwargs_str = str(self.conv_kwargs) if len(
130 | self.conv_kwargs) > 0 else ''
131 | filter_sizes_li = [str(x) for x in self.filter_sizes]
132 | li = ['cnn_kim', str(self.num_filters)] + filter_sizes_li + [
133 | 'do', to_fixed_digits(self.dropout_rate), conv_kwargs_str]
134 | return '_'.join(li)
135 |
136 |
137 | class StackedRNN(SequenceEncoderBase):
138 | def __init__(self, rnn_class=LSTM, hidden_dims=[50, 50], bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
139 | """Creates a stacked RNN.
140 |
141 | Args:
142 | rnn_class: The type of RNN to use. (Default Value = LSTM)
143 | encoder_dims: The number of hidden units of RNN. (Default Value: 50)
144 | bidirectional: Whether to use bidirectional encoding. (Default Value = True)
145 | **rnn_kwargs: Additional args for building the RNN.
146 | """
147 | super(StackedRNN, self).__init__(dropout_rate)
148 | self.rnn_class = rnn_class
149 | self.hidden_dims = hidden_dims
150 | self.bidirectional = bidirectional
151 | self.rnn_kwargs = rnn_kwargs
152 |
153 | def build_model(self, x):
154 | for i, n in enumerate(self.hidden_dims):
155 | is_last_layer = i == len(self.hidden_dims) - 1
156 | rnn = self.rnn_class(
157 | n, return_sequences=not is_last_layer, **self.rnn_kwargs)
158 | if self.bidirectional:
159 | x = Bidirectional(rnn)(x)
160 | else:
161 | x = rnn(x)
162 | return x
163 |
164 | def allows_dynamic_length(self):
165 | return True
166 |
167 | def __str__(self):
168 | bi = 'bi' if self.bidirectional else 'nobi'
169 | rnn_classs_str = self.rnn_class.__name__
170 | rnn_kwargs_str = str(self.rnn_kwargs) if len(
171 | self.rnn_kwargs) > 0 else ''
172 | hidden_dims_li = [str(x) for x in self.hidden_dims]
173 | li = ['stacked', rnn_classs_str] + hidden_dims_li + [
174 | bi, 'do', to_fixed_digits(self.dropout_rate), rnn_kwargs_str]
175 | return '_'.join(li)
176 |
177 |
178 | class BasicRNN(StackedRNN):
179 | def __init__(self, rnn_class=LSTM, hidden_dims=50, bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
180 | super(BasicRNN, self).__init__(rnn_class=rnn_class, hidden_dims=[
181 | hidden_dims], bidirectional=bidirectional, dropout_rate=dropout_rate, **rnn_kwargs)
182 |
183 | def __str__(self):
184 | bi = 'bi' if self.bidirectional else 'nobi'
185 | rnn_classs_str = self.rnn_class.__name__
186 | rnn_kwargs_str = str(self.rnn_kwargs) if len(
187 | self.rnn_kwargs) > 0 else ''
188 | hidden_dims_li = [str(x) for x in self.hidden_dims]
189 | li = ['basic', rnn_classs_str] + hidden_dims_li + [
190 | bi, 'do', to_fixed_digits(self.dropout_rate), rnn_kwargs_str]
191 | return '_'.join(li)
192 |
193 |
194 | class AttentionRNN(SequenceEncoderBase):
195 |
196 | def __init__(self, rnn_class=LSTM, encoder_dims=50, bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
197 | """Creates an RNN model with attention. The attention mechanism is implemented as described
198 | in https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf, but without
199 | sentence level attention.
200 |
201 | Args:
202 | rnn_class: The type of RNN to use. (Default Value = LSTM)
203 | encoder_dims: The number of hidden units of RNN. (Default Value: 50)
204 | bidirectional: Whether to use bidirectional encoding. (Default Value = True)
205 | **rnn_kwargs: Additional args for building the RNN.
206 | """
207 | super(AttentionRNN, self).__init__(dropout_rate)
208 | self.rnn_class = rnn_class
209 | self.encoder_dims = encoder_dims
210 | self.bidirectional = bidirectional
211 | self.rnn_kwargs = rnn_kwargs
212 |
213 | def build_model(self, x):
214 | rnn = self.rnn_class(
215 | self.encoder_dims, return_sequences=True, **self.rnn_kwargs)
216 | if self.bidirectional:
217 | word_activations = Bidirectional(rnn)(x)
218 | else:
219 | word_activations = rnn(x)
220 |
221 | attention_layer = AttentionLayer()
222 | doc_vector = attention_layer(word_activations)
223 | self.attention_tensor = attention_layer.get_attention_tensor()
224 | return doc_vector
225 |
226 | def get_attention_tensor(self):
227 | if not hasattr(self, 'attention_tensor'):
228 | raise ValueError('You need to build the model first')
229 | return self.attention_tensor
230 |
231 | def allows_dynamic_length(self):
232 | return True
233 |
234 | def __str__(self):
235 | bi = 'bi' if self.bidirectional else 'nobi'
236 | rnn_kwargs_str = str(self.rnn_kwargs) if len(
237 | self.rnn_kwargs) > 0 else ''
238 | li = ['stacked', str(self.rnn_class), str(self.encoder_dims),
239 | bi, 'do', to_fixed_digits(self.dropout_rate), rnn_kwargs_str]
240 |
241 | return '_'.join(li)
242 |
243 |
244 | class AveragingEncoder(SequenceEncoderBase):
245 |
246 | def __init__(self, dropout_rate=0):
247 | """An encoder that averages sequence inputs.
248 | """
249 | super(AveragingEncoder, self).__init__(dropout_rate)
250 |
251 | def __str__(self):
252 | return 'avg_encoder'
253 |
254 | def build_model(self, x):
255 | x = GlobalAveragePooling1D()(x)
256 | return x
257 |
--------------------------------------------------------------------------------
/texcla/models/token_model.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from keras.layers import Dense, Embedding, Input
4 | from keras.models import Model
5 |
6 | from ..embeddings import build_embedding_weights, get_embeddings_index
7 | from .sequence_encoders import SequenceEncoderBase
8 |
9 |
10 | class TokenModelFactory(object):
11 | def __init__(self, num_classes, token_index, max_tokens,
12 | embedding_type='glove.6B.100d', embedding_dims=100, embedding_path=None):
13 | """Creates a `TokenModelFactory` instance for building various models that operate over
14 | (samples, max_tokens) input. The token can be character, word or any other elementary token.
15 |
16 | Args:
17 | num_classes: The number of output classes.
18 | token_index: The dictionary of token and its corresponding integer index value.
19 | max_tokens: The max number of tokens across all documents. This can be set to None for models that
20 | allow different word lengths per mini-batch.
21 | embedding_type: The embedding type to use. Set to None to use random embeddings.
22 | (Default value: 'glove.6B.100d')
23 | embedding_dims: The number of embedding dims to use for representing a word. This argument will be ignored
24 | when `embedding_type` is set. (Default value: 100)
25 | """
26 | self.num_classes = num_classes
27 | self.token_index = token_index
28 | self.max_tokens = max_tokens
29 |
30 | if embedding_type is not None or embedding_path is not None:
31 | self.embeddings_index = get_embeddings_index(
32 | embedding_type, embedding_dims, embedding_path)
33 | self.embedding_dims = list(self.embeddings_index.values())[
34 | 0].shape[-1]
35 | else:
36 | self.embeddings_index = None
37 | self.embedding_dims = embedding_dims
38 |
39 | def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'):
40 | """Builds a model using the given `text_model`
41 |
42 | Args:
43 | token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document.
44 | This encoding is then fed into a final `Dense` layer for classification.
45 | trainable_embeddings: Whether or not to fine tune embeddings.
46 | output_activation: The output activation to use. (Default value: 'softmax')
47 | Use:
48 | - `softmax` for binary or multi-class.
49 | - `sigmoid` for multi-label classification.
50 | - `linear` for regression output.
51 |
52 | Returns:
53 | The model output tensor.
54 | """
55 | if not isinstance(token_encoder_model, SequenceEncoderBase):
56 | raise ValueError("`token_encoder_model` should be an instance of `{}`".format(
57 | SequenceEncoderBase))
58 |
59 | if not token_encoder_model.allows_dynamic_length() and self.max_tokens is None:
60 | raise ValueError("The provided `token_encoder_model` does not allow variable length mini-batches. "
61 | "You need to provide `max_tokens`")
62 |
63 | if self.embeddings_index is None:
64 | # The +1 is for unknown token index 0.
65 | embedding_layer = Embedding(len(self.token_index),
66 | self.embedding_dims,
67 | input_length=self.max_tokens,
68 | mask_zero=token_encoder_model.allows_dynamic_length(),
69 | trainable=trainable_embeddings)
70 | else:
71 | embedding_layer = Embedding(len(self.token_index),
72 | self.embedding_dims,
73 | weights=[build_embedding_weights(
74 | self.token_index, self.embeddings_index)],
75 | input_length=self.max_tokens,
76 | mask_zero=token_encoder_model.allows_dynamic_length(),
77 | trainable=trainable_embeddings)
78 |
79 | sequence_input = Input(shape=(self.max_tokens,), dtype='int32')
80 | x = embedding_layer(sequence_input)
81 | x = token_encoder_model(x)
82 | x = Dense(self.num_classes, activation=output_activation)(x)
83 | return Model(sequence_input, x)
84 |
--------------------------------------------------------------------------------
/texcla/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | from .char_tokenizer import *
2 | from .word_tokenizer import *
3 | from .sentence_tokenizer import *
4 |
--------------------------------------------------------------------------------
/texcla/preprocessing/char_tokenizer.py:
--------------------------------------------------------------------------------
1 | try:
2 | import spacy
3 | except ImportError:
4 | pass
5 |
6 | from .tokenizer import Tokenizer
7 | from . import utils
8 |
9 |
10 | class CharTokenizer(Tokenizer):
11 |
12 | def __init__(self,
13 | lang='en',
14 | lower=True,
15 | charset=None):
16 | """Encodes text into `(samples, characters)`
17 |
18 | Args:
19 | lang: The spacy language to use. (Default value: 'en')
20 | lower: Lower cases the tokens if True. (Default value: True)
21 | charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
22 | (Default value: None)
23 | """
24 | super(CharTokenizer, self).__init__(lang, lower)
25 | self.charset = charset
26 |
27 | def token_generator(self, texts, **kwargs):
28 | """Yields tokens from texts as `(text_idx, character)`
29 | """
30 | for text_idx, text in enumerate(texts):
31 | if self.lower:
32 | text = text.lower()
33 | for char in text:
34 | yield text_idx, char
35 |
36 |
37 | class SentenceCharTokenizer(CharTokenizer):
38 |
39 | def __init__(self,
40 | lang='en',
41 | lower=True,
42 | charset=None):
43 | """Encodes text into `(samples, sentences, characters)`
44 |
45 | Args:
46 | lang: The spacy language to use. (Default value: 'en')
47 | lower: Lower cases the tokens if True. (Default value: True)
48 | charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
49 | (Default value: None)
50 | """
51 | super(SentenceCharTokenizer, self).__init__(lang, lower, charset)
52 |
53 | def token_generator(self, texts, **kwargs):
54 | """Yields tokens from texts as `(text_idx, sent_idx, character)`
55 |
56 | Args:
57 | texts: The list of texts.
58 | **kwargs: Supported args include:
59 | n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
60 | batch_size: The number of texts to accumulate into a common working set before processing.
61 | (Default value: 1000)
62 | """
63 | # Perf optimization. Only process what is necessary.
64 | n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
65 | nlp = spacy.load(self.lang)
66 |
67 | kwargs = {
68 | 'batch_size': batch_size,
69 | 'n_threads': n_threads,
70 | 'disable': ['ner']
71 | }
72 |
73 | # Perf optimization: Lower the entire text instead of individual tokens.
74 | texts_gen = utils._apply_generator(
75 | texts, lambda x: x.lower()) if self.lower else texts
76 | for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)):
77 | for sent_idx, sent in enumerate(doc.sents):
78 | for word in sent:
79 | for char in word:
80 | yield text_idx, sent_idx, char
81 |
--------------------------------------------------------------------------------
/texcla/preprocessing/sentence_tokenizer.py:
--------------------------------------------------------------------------------
1 | try:
2 | import spacy
3 | except ImportError:
4 | pass
5 |
6 | from . import utils
7 | from .word_tokenizer import SpacyTokenizer
8 |
9 |
10 | class SpacySentenceTokenizer(SpacyTokenizer):
11 | def __init__(self,
12 | lang='en',
13 | lower=True,
14 | lemmatize=False,
15 | remove_punct=True,
16 | remove_digits=True,
17 | remove_stop_words=False,
18 | exclude_oov=False,
19 | exclude_pos_tags=None,
20 | exclude_entities=['PERSON']):
21 | """Encodes text into `(samples, sentences, words)`
22 |
23 | Args:
24 | lang: The spacy language to use. (Default value: 'en')
25 | lower: Lower cases the tokens if True. (Default value: True)
26 | lemmatize: Lemmatizes words when set to True. This also makes the word lower case
27 | irrespective if the `lower` setting. (Default value: False)
28 | remove_punct: Removes punct words if True. (Default value: True)
29 | remove_digits: Removes digit words if True. (Default value: True)
30 | remove_stop_words: Removes stop words if True. (Default value: False)
31 | exclude_oov: Exclude words that are out of spacy embedding's vocabulary.
32 | By default, GloVe 1 million, 300 dim are used. You can override spacy vocabulary with a custom
33 | embedding to change this. (Default value: False)
34 | exclude_pos_tags: A list of parts of speech tags to exclude. Can be any of spacy.parts_of_speech.IDS
35 | (Default value: None)
36 | exclude_entities: A list of entity types to be excluded.
37 | Supported entity types can be found here: https://spacy.io/docs/usage/entity-recognition#entity-types
38 | (Default value: ['PERSON'])
39 | """
40 | super(SpacySentenceTokenizer, self).__init__(lang,
41 | lower,
42 | lemmatize,
43 | remove_punct,
44 | remove_digits,
45 | remove_stop_words,
46 | exclude_oov,
47 | exclude_pos_tags,
48 | exclude_entities)
49 |
50 | def token_generator(self, texts, **kwargs):
51 | """Yields tokens from texts as `(text_idx, sent_idx, word)`
52 |
53 | Args:
54 | texts: The list of texts.
55 | **kwargs: Supported args include:
56 | n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
57 | batch_size: The number of texts to accumulate into a common working set before processing.
58 | (Default value: 1000)
59 | """
60 | # Perf optimization. Only process what is necessary.
61 | n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
62 | nlp = spacy.load(self.lang)
63 |
64 | disabled = []
65 | if len(self.exclude_entities) > 0:
66 | disabled.append('ner')
67 |
68 | kwargs = {
69 | 'batch_size': batch_size,
70 | 'n_threads': n_threads,
71 | 'disable': disabled
72 | }
73 |
74 | for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)):
75 | for sent_idx, sent in enumerate(doc.sents):
76 | for word in sent:
77 | processed_word = self._apply_options(word)
78 | if processed_word is not None:
79 | yield text_idx, sent_idx, processed_word
80 |
--------------------------------------------------------------------------------
/texcla/preprocessing/tokenizer.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, unicode_literals
2 |
3 | import abc
4 | import logging
5 | from collections import OrderedDict, defaultdict
6 | from copy import deepcopy
7 | from multiprocessing import cpu_count
8 |
9 | import numpy as np
10 | import six
11 | from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
12 | from keras.utils.generic_utils import Progbar
13 |
14 | from ..libs import ngrams
15 |
16 | from . import utils
17 | from ..utils import io
18 |
19 | try:
20 | import spacy
21 | except ImportError:
22 | pass
23 |
24 |
25 | logger = logging.getLogger(__name__)
26 |
27 |
28 | class Tokenizer(object):
29 |
30 | def __init__(self,
31 | lang='en',
32 | lower=True,
33 | special_token=['', '']): # 0 - Pad, 1 - Unkown
34 | """Encodes text into `(samples, aux_indices..., token)` where each token is mapped to a unique index starting
35 | from `i`. `i` is the number of special tokens.
36 |
37 | Args:
38 | lang: The spacy language to use. (Default value: 'en')
39 | lower: Lower cases the tokens if True. (Default value: True)
40 | special_token: The tokens that are reserved. Default: ['', ''], for unknown words and for padding token.
41 |
42 | """
43 |
44 | self.lang = lang
45 | self.lower = lower
46 | self.special_token = special_token
47 |
48 | self._token2idx = dict()
49 | self._idx2token = dict()
50 | self._token_counts = defaultdict(int)
51 |
52 | self._num_texts = 0
53 | self._counts = None
54 |
55 | @abc.abstractmethod
56 | def token_generator(self, texts, **kwargs):
57 | """Generator for yielding tokens. You need to implement this method.
58 |
59 | Args:
60 | texts: list of text items to tokenize.
61 | **kwargs: The kwargs propagated from `build_vocab_and_encode` or `encode_texts` call.
62 |
63 | Returns:
64 | `(text_idx, aux_indices..., token)` where aux_indices are optional. For example, if you want to vectorize
65 | `texts` as `(text_idx, sentences, words), you should return `(text_idx, sentence_idx, word_token)`.
66 | Similarly, you can include paragraph, page level information etc., if needed.
67 | """
68 | raise NotImplementedError()
69 |
70 | def create_token_indices(self, tokens):
71 | """If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with
72 | a desired strategy and regenerate `token_index` using this method. The token index is subsequently used
73 | when `encode_texts` or `decode_texts` methods are called.
74 | """
75 | start_index = len(self.special_token)
76 | indices = list(range(len(tokens) + start_index))
77 | # prepend because the special tokens come in the beginning
78 | tokens_with_special = self.special_token + list(tokens)
79 | self._token2idx = dict(list(zip(tokens_with_special, indices)))
80 | self._idx2token = dict(list(zip(indices, tokens_with_special)))
81 |
82 | def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None):
83 | """Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
84 | play with different settings without having to re-run tokenization on the entire corpus.
85 |
86 | Args:
87 | min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
88 | below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
89 | limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
90 | tokens will be kept. Set to None to keep everything. (Default value: None)
91 | """
92 | if not self.has_vocab:
93 | raise ValueError("You need to build the vocabulary using `build_vocab` "
94 | "before using `apply_encoding_options`")
95 | if min_token_count < 1:
96 | raise ValueError("`min_token_count` should atleast be 1")
97 |
98 | # Remove tokens with freq < min_token_count
99 | token_counts = list(self._token_counts.items())
100 | token_counts = [x for x in token_counts if x[1] >= min_token_count]
101 |
102 | # Clip to max_tokens.
103 | if limit_top_tokens is not None:
104 | token_counts.sort(key=lambda x: x[1], reverse=True)
105 | filtered_tokens = list(zip(*token_counts))[0]
106 | filtered_tokens = filtered_tokens[:limit_top_tokens]
107 | else:
108 | filtered_tokens = zip(*token_counts)[0]
109 |
110 | # Generate indices based on filtered tokens.
111 | self.create_token_indices(filtered_tokens)
112 |
113 | def encode_texts(self, texts, unknown_token="", verbose=1, **kwargs):
114 | """Encodes the given texts using internal vocabulary with optionally applied encoding options. See
115 | ``apply_encoding_options` to set various options.
116 |
117 | Args:
118 | texts: The list of text items to encode.
119 | unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.
120 | verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
121 | **kwargs: The kwargs for `token_generator`.
122 |
123 | Returns:
124 | The encoded texts.
125 | """
126 | if not self.has_vocab:
127 | raise ValueError(
128 | "You need to build the vocabulary using `build_vocab` before using `encode_texts`")
129 |
130 | if unknown_token and unknown_token not in self.special_token:
131 | raise ValueError(
132 | "Your special token (" + unknown_token + ") to replace unknown words is not in the list of special token: " + self.special_token)
133 |
134 | progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
135 | encoded_texts = []
136 | for token_data in self.token_generator(texts, **kwargs):
137 | indices, token = token_data[:-1], token_data[-1]
138 |
139 | token_idx = self._token2idx.get(token)
140 | if token_idx is None and unknown_token:
141 | token_idx = self.special_token.index(unknown_token)
142 |
143 | if token_idx is not None:
144 | utils._append(encoded_texts, indices, token_idx)
145 |
146 | # Update progressbar per document level.
147 | progbar.update(indices[0])
148 |
149 | # All done. Finalize progressbar.
150 | progbar.update(len(texts))
151 | return encoded_texts
152 |
153 | def decode_texts(self, encoded_texts, unknown_token="", inplace=True):
154 | """Decodes the texts using internal vocabulary. The list structure is maintained.
155 |
156 | Args:
157 | encoded_texts: The list of texts to decode.
158 | unknown_token: The placeholder value for unknown token. (Default value: "")
159 | inplace: True to make changes inplace. (Default value: True)
160 |
161 | Returns:
162 | The decoded texts.
163 | """
164 | if len(self._token2idx) == 0:
165 | raise ValueError(
166 | "You need to build vocabulary using `build_vocab` before using `decode_texts`")
167 |
168 | if not isinstance(encoded_texts, list):
169 | # assume it's a numpy array
170 | encoded_texts = encoded_texts.tolist()
171 |
172 | if not inplace:
173 | encoded_texts = deepcopy(encoded_texts)
174 | utils._recursive_apply(encoded_texts,
175 | lambda token_id: self._idx2token.get(token_id) or unknown_token)
176 | return encoded_texts
177 |
178 | def add_tokens(self, ngram_set):
179 | start_index = len(self._token2idx) + 1
180 | print('start: ', start_index)
181 | tmp = {}
182 | for k, v in enumerate(ngram_set):
183 | # print(k, v)
184 | idx = k + start_index
185 | self._token2idx[v] = idx
186 | self._idx2token[idx] = v
187 |
188 | tmp[v] = idx
189 | # TODO: Counts?
190 | return tmp
191 |
192 | def add_ngrams(self, encoded_texts, train=False, n=2):
193 | if train:
194 | ngram_set = set()
195 | for input_list in encoded_texts:
196 | for i in range(2, n + 1):
197 | set_of_ngram = ngrams.create_ngram_set(
198 | input_list, ngram_value=i)
199 | ngram_set.update(set_of_ngram)
200 | print(list(ngram_set)[:1000])
201 | ngram_set = [x for x in ngram_set if 1 not in x]
202 | print(ngram_set[:1000])
203 | tmp = self.add_tokens(ngram_set)
204 |
205 | return ngrams.add_ngram(encoded_texts, token_indice=tmp, ngram_range=n)
206 |
207 | def build_vocab(self, texts, verbose=1, **kwargs):
208 | """Builds the internal vocabulary and computes various statistics.
209 |
210 | Args:
211 | texts: The list of text items to encode.
212 | verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
213 | **kwargs: The kwargs for `token_generator`.
214 | """
215 | if self.has_vocab:
216 | logger.warn(
217 | "Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")
218 |
219 | progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
220 | count_tracker = utils._CountTracker()
221 |
222 | self._token_counts.clear()
223 | self._num_texts = len(texts)
224 |
225 | for token_data in self.token_generator(texts, **kwargs):
226 | indices, token = token_data[:-1], token_data[-1]
227 | count_tracker.update(indices)
228 | self._token_counts[token] += 1
229 |
230 | # Update progressbar per document level.
231 | progbar.update(indices[0])
232 |
233 | # Generate token2idx and idx2token.
234 | self.create_token_indices(self._token_counts.keys())
235 |
236 | # All done. Finalize progressbar update and count tracker.
237 | count_tracker.finalize()
238 | self._counts = count_tracker.counts
239 | progbar.update(len(texts))
240 |
241 | def pad_sequences(self, sequences, fixed_sentences_seq_length=None, fixed_token_seq_length=None,
242 | padding='pre', truncating='post', padding_token=""):
243 | """Pads each sequence to the same fixed length (length of the longest sequence or provided override).
244 |
245 | Args:
246 | sequences: list of list (samples, words) or list of list of list (samples, sentences, words)
247 | fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used.
248 | fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used.
249 | padding: 'pre' or 'post', pad either before or after each sequence.
250 | truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length
251 | either in the beginning or in the end of the sentence or word sequence respectively.
252 | padding_token: The token to add for padding.
253 |
254 | Returns:
255 | Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input.
256 |
257 | Raises:
258 | ValueError: in case of invalid values for `truncating` or `padding`.
259 | """
260 | value = self.special_token.index(padding_token)
261 | if value < 0:
262 | raise ValueError('The padding token "' + padding_token +
263 | " is not in the special tokens of the tokenizer.")
264 | # Determine if input is (samples, max_sentences, max_tokens) or not.
265 | if isinstance(sequences[0][0], list):
266 | x = utils._pad_sent_sequences(sequences, fixed_sentences_seq_length,
267 | fixed_token_seq_length, padding, truncating, value)
268 | else:
269 | x = utils._pad_token_sequences(
270 | sequences, fixed_token_seq_length, padding, truncating, value)
271 | return np.array(x, dtype='int32')
272 |
273 | def get_counts(self, i):
274 | """Numpy array of count values for aux_indices. For example, if `token_generator` generates
275 | `(text_idx, sentence_idx, word)`, then `get_counts(0)` returns the numpy array of sentence lengths across
276 | texts. Similarly, `get_counts(1)` will return the numpy array of token lengths across sentences.
277 |
278 | This is useful to plot histogram or eyeball the distributions. For getting standard statistics, you can use
279 | `get_stats` method.
280 | """
281 | if not self.has_vocab:
282 | raise ValueError(
283 | "You need to build the vocabulary using `build_vocab` before using `get_counts`")
284 | return self._counts[i]
285 |
286 | def get_stats(self, i):
287 | """Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates
288 | `(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths
289 | across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences.
290 |
291 | This information can be used to pad or truncate inputs.
292 | """
293 | # OrderedDict to always show same order if printed.
294 | result = OrderedDict()
295 | result['min'] = np.min(self._counts[i])
296 | result['max'] = np.max(self._counts[i])
297 | result['std'] = np.std(self._counts[i])
298 | result['mean'] = np.mean(self._counts[i])
299 | return result
300 |
301 | def save(self, file_path):
302 | """Serializes this tokenizer to a file.
303 |
304 | Args:
305 | file_path: The file path to use.
306 | """
307 | io.dump(self, file_path)
308 |
309 | @staticmethod
310 | def load(file_path):
311 | """Loads the Tokenizer from a file.
312 |
313 | Args:
314 | file_path: The file path to use.
315 |
316 | Returns:
317 | The `Dataset` instance.
318 | """
319 | return io.load(file_path)
320 |
321 | @property
322 | def has_vocab(self):
323 | return len(self._token_counts) > 0 and self._counts is not None
324 |
325 | @property
326 | def token_index(self):
327 | """Dictionary of token -> idx mappings. This can change with calls to `apply_encoding_options`.
328 | """
329 | return self._token2idx
330 |
331 | @property
332 | def token_counts(self):
333 | """Dictionary of token -> count values for the text corpus used to `build_vocab`.
334 | """
335 | return self._token_counts
336 |
337 | @property
338 | def num_tokens(self):
339 | """Number of unique tokens for use in enccoding/decoding.
340 | This can change with calls to `apply_encoding_options`.
341 | """
342 | return len(self._token2idx)
343 |
344 | @property
345 | def num_texts(self):
346 | """The number of texts used to build the vocabulary.
347 | """
348 | return self._num_texts
349 |
--------------------------------------------------------------------------------
/texcla/preprocessing/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, unicode_literals
2 |
3 | import abc
4 | import logging
5 | from collections import OrderedDict, defaultdict
6 | from copy import deepcopy
7 | from multiprocessing import cpu_count
8 |
9 | import numpy as np
10 | import six
11 | from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
12 | from keras.utils.generic_utils import Progbar
13 |
14 | try:
15 | import spacy
16 | except ImportError:
17 | pass
18 |
19 |
20 | logger = logging.getLogger(__name__)
21 |
22 |
23 | class _CountTracker(object):
24 | """Helper class to track counts of various document hierarchies in the corpus.
25 | For example, if the tokenizer can tokenize docs as (docs, paragraph, sentences, words), then this utility
26 | will track number of paragraphs, number of sentences within paragraphs and number of words within sentence.
27 | """
28 |
29 | def __init__(self):
30 | self._prev_indices = None
31 | self._local_counts = None
32 | self.counts = None
33 |
34 | def update(self, indices):
35 | """Updates counts based on indices. The algorithm tracks the index change at i and
36 | update global counts for all indices beyond i with local counts tracked so far.
37 | """
38 | # Initialize various lists for the first time based on length of indices.
39 | if self._prev_indices is None:
40 | self._prev_indices = indices
41 |
42 | # +1 to track token counts in the last index.
43 | self._local_counts = np.full(len(indices) + 1, 1)
44 | self._local_counts[-1] = 0
45 | self.counts = [[] for _ in range(len(self._local_counts))]
46 |
47 | has_reset = False
48 | for i in range(len(indices)):
49 | # index value changed. Push all local values beyond i to count and reset those local_counts.
50 | # For example, if document index changed, push counts on sentences and tokens and reset their local_counts
51 | # to indicate that we are tracking those for new document. We need to do this at all document hierarchies.
52 | if indices[i] > self._prev_indices[i]:
53 | self._local_counts[i] += 1
54 | has_reset = True
55 | for j in range(i + 1, len(self.counts)):
56 | self.counts[j].append(self._local_counts[j])
57 | self._local_counts[j] = 1
58 |
59 | # If none of the aux indices changed, update token count.
60 | if not has_reset:
61 | self._local_counts[-1] += 1
62 | self._prev_indices = indices[:]
63 |
64 | def finalize(self):
65 | """This will add the very last document to counts. We also get rid of counts[0] since that
66 | represents document level which doesnt come under anything else. We also convert all count
67 | values to numpy arrays so that stats can be computed easily.
68 | """
69 | for i in range(1, len(self._local_counts)):
70 | self.counts[i].append(self._local_counts[i])
71 | self.counts.pop(0)
72 |
73 | for i in range(len(self.counts)):
74 | self.counts[i] = np.array(self.counts[i])
75 |
76 |
77 | def _apply_generator(texts, apply_fn):
78 | for text in texts:
79 | yield apply_fn(text)
80 |
81 |
82 | def _append(lst, indices, value):
83 | """Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required.
84 | """
85 | for i, idx in enumerate(indices):
86 | # We need to loop because sometimes indices can increment by more than 1 due to missing tokens.
87 | # Example: Sentence with no words after filtering words.
88 | while len(lst) <= idx:
89 | # Update max counts whenever a new sublist is created.
90 | # There is no need to worry about indices beyond `i` since they will end up creating new lists as well.
91 | lst.append([])
92 | lst = lst[idx]
93 |
94 | # Add token and update token max count.
95 | lst.append(value)
96 |
97 |
98 | def _recursive_apply(lst, apply_fn):
99 | if len(lst) > 0 and not isinstance(lst[0], list):
100 | for i in range(len(lst)):
101 | lst[i] = apply_fn(lst[i])
102 | else:
103 | for sub_list in lst:
104 | _recursive_apply(sub_list, apply_fn)
105 |
106 |
107 | def _to_unicode(text):
108 | if not isinstance(text, six.text_type):
109 | text = text.decode('utf-8')
110 | return text
111 |
112 |
113 | def _parse_spacy_kwargs(**kwargs):
114 | """Supported args include:
115 |
116 | Args:
117 | n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
118 | batch_size: The number of texts to accumulate into a common working set before processing.
119 | (Default value: 1000)
120 | """
121 | n_threads = kwargs.get('n_threads') or kwargs.get('num_threads')
122 | batch_size = kwargs.get('batch_size')
123 |
124 | if n_threads is None or n_threads is -1:
125 | n_threads = cpu_count() - 1
126 | if batch_size is None or batch_size is -1:
127 | batch_size = 1000
128 | return n_threads, batch_size
129 |
130 |
131 | def _pad_token_sequences(sequences, max_tokens,
132 | padding, truncating, value):
133 | # TODO: better variable names (see below)
134 | return keras_pad_sequences(sequences, maxlen=max_tokens, padding=padding, truncating=truncating, value=value)
135 |
136 |
137 | def _pad_sent_sequences(sequences, max_sentences, max_tokens, padding, truncating, value):
138 | # TODO: better names (see below)
139 | # Infer max lengths if needed.
140 | if max_sentences is None or max_tokens is None:
141 | max_sentences_computed = 0
142 | max_tokens_computed = 0
143 | for sent_seq in sequences:
144 | max_sentences_computed = max(max_sentences_computed, len(sent_seq))
145 | max_tokens_computed = max(max_tokens_computed, np.max(
146 | [len(token_seq) for token_seq in sent_seq]))
147 |
148 | # Only use inferred values for None.
149 | if max_sentences is None:
150 | max_sentences = max_sentences_computed
151 |
152 | if max_tokens is None:
153 | max_tokens = max_tokens_computed
154 |
155 | result = np.ones(shape=(len(sequences), max_sentences, max_tokens)) * value
156 |
157 | for idx, sent_seq in enumerate(sequences):
158 | # empty list/array was found
159 | if not len(sent_seq):
160 | continue
161 | if truncating == 'pre':
162 | trunc = sent_seq[-max_sentences:]
163 | elif truncating == 'post':
164 | trunc = sent_seq[:max_sentences]
165 | else:
166 | raise ValueError(
167 | 'Truncating type "%s" not understood' % truncating)
168 |
169 | # Apply padding.
170 | if padding == 'post':
171 | result[idx, :len(trunc)] = _pad_token_sequences(
172 | trunc, max_tokens, padding, truncating, value)
173 | elif padding == 'pre':
174 | result[idx, -len(trunc):] = _pad_token_sequences(trunc,
175 | max_tokens, padding, truncating, value)
176 | else:
177 | raise ValueError('Padding type "%s" not understood' % padding)
178 | return result
179 |
180 |
181 | def unicodify(texts):
182 | """Encodes all text sequences as unicode. This is a python2 hassle.
183 |
184 | Args:
185 | texts: The sequence of texts.
186 |
187 | Returns:
188 | Unicode encoded sequences.
189 | """
190 | return [_to_unicode(text) for text in texts]
191 |
--------------------------------------------------------------------------------
/texcla/preprocessing/word_tokenizer.py:
--------------------------------------------------------------------------------
1 | try:
2 | import spacy
3 | except ImportError:
4 | pass
5 |
6 | from . import utils
7 | from ..libs import fastTextWikiTokenizer, twokenize
8 | from .tokenizer import Tokenizer
9 |
10 |
11 | class SpacyTokenizer(Tokenizer):
12 | def __init__(self,
13 | lang='en',
14 | lower=True,
15 | lemmatize=False,
16 | remove_punct=True,
17 | remove_digits=True,
18 | remove_stop_words=False,
19 | exclude_oov=False,
20 | exclude_pos_tags=None,
21 | exclude_entities=['PERSON']):
22 | """Encodes text into `(samples, words)`
23 |
24 | Args:
25 | lang: The spacy language to use. (Default value: 'en')
26 | lower: Lower cases the tokens if True. (Default value: True)
27 | lemmatize: Lemmatizes words when set to True. This also makes the word lower case
28 | irrespective if the `lower` setting. (Default value: False)
29 | remove_punct: Removes punct words if True. (Default value: True)
30 | remove_digits: Removes digit words if True. (Default value: True)
31 | remove_stop_words: Removes stop words if True. (Default value: False)
32 | exclude_oov: Exclude words that are out of spacy embedding's vocabulary.
33 | By default, GloVe 1 million, 300 dim are used. You can override spacy vocabulary with a custom
34 | embedding to change this. (Default value: False)
35 | exclude_pos_tags: A list of parts of speech tags to exclude. Can be any of spacy.parts_of_speech.IDS
36 | (Default value: None)
37 | exclude_entities: A list of entity types to be excluded.
38 | Supported entity types can be found here: https://spacy.io/docs/usage/entity-recognition#entity-types
39 | (Default value: ['PERSON'])
40 | """
41 |
42 | super(SpacyTokenizer, self).__init__(lang, lower)
43 | self.lemmatize = lemmatize
44 | self.remove_punct = remove_punct
45 | self.remove_digits = remove_digits
46 | self.remove_stop_words = remove_stop_words
47 |
48 | self.exclude_oov = exclude_oov
49 | self.exclude_pos_tags = set(exclude_pos_tags or [])
50 | self.exclude_entities = set(exclude_entities or [])
51 |
52 | def _apply_options(self, token):
53 | """Applies various filtering and processing options on token.
54 |
55 | Returns:
56 | The processed token. None if filtered.
57 | """
58 | # Apply work token filtering.
59 | if token.is_punct and self.remove_punct:
60 | return None
61 | if token.is_stop and self.remove_stop_words:
62 | return None
63 | if token.is_digit and self.remove_digits:
64 | return None
65 | if token.is_oov and self.exclude_oov:
66 | return None
67 | if token.pos_ in self.exclude_pos_tags:
68 | return None
69 | if token.ent_type_ in self.exclude_entities:
70 | return None
71 |
72 | # Lemmatized ones are already lowered.
73 | if self.lemmatize:
74 | return token.lemma_
75 | if self.lower:
76 | return token.lower_
77 | return token.orth_
78 |
79 | def token_generator(self, texts, **kwargs):
80 | """Yields tokens from texts as `(text_idx, word)`
81 |
82 | Args:
83 | texts: The list of texts.
84 | **kwargs: Supported args include:
85 | n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
86 | batch_size: The number of texts to accumulate into a common working set before processing.
87 | (Default value: 1000)
88 | """
89 | # Perf optimization. Only process what is necessary.
90 | n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
91 | nlp = spacy.load(self.lang)
92 |
93 | disabled = ['parser']
94 | if len(self.exclude_entities) > 0:
95 | disabled.append('ner')
96 |
97 | kwargs = {
98 | 'batch_size': batch_size,
99 | 'n_threads': n_threads,
100 | 'disable': disabled
101 | }
102 |
103 | for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)):
104 | for word in doc:
105 | processed_word = self._apply_options(word)
106 | if processed_word is not None:
107 | yield text_idx, processed_word
108 |
109 |
110 | class TwokenizeTokenizer(Tokenizer):
111 | def __init__(self, lang='en', lower=True):
112 | super(TwokenizeTokenizer, self).__init__(lang, lower)
113 |
114 | def token_generator(self, texts):
115 | for id, text in enumerate(texts):
116 | if self.lower:
117 | text = text.lower()
118 | tokens = twokenize.tokenize(text)
119 | for t in tokens:
120 | yield id, t
121 |
122 |
123 | class SimpleTokenizer(Tokenizer):
124 | def __init__(self, lang='en', lower=True):
125 | super(SimpleTokenizer, self).__init__(lang, lower)
126 |
127 | def token_generator(self, texts):
128 | for id, text in enumerate(texts):
129 | if self.lower:
130 | text = text.lower()
131 | tokens = text.split()
132 | for t in tokens:
133 | yield id, t
134 |
135 |
136 | class FastTextWikiTokenizer(Tokenizer):
137 | def __init__(self, lang='en'):
138 | super(FastTextWikiTokenizer, self).__init__(lang, lower=True)
139 |
140 | def token_generator(self, texts):
141 | for id, text in enumerate(texts):
142 | tokens = fastTextWikiTokenizer.tokenize(text)
143 | for t in tokens:
144 | yield id, t
145 |
--------------------------------------------------------------------------------
/texcla/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jfilter/text-classification-keras/85882306d00242a4e6ead90d94f8a1f98a86535a/texcla/utils/__init__.py
--------------------------------------------------------------------------------
/texcla/utils/format.py:
--------------------------------------------------------------------------------
1 | def to_fixed_digits(number):
2 | return "%.4f" % round(number, 4)
3 |
--------------------------------------------------------------------------------
/texcla/utils/generators.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 |
5 | from keras.utils import Sequence
6 |
7 |
8 | class ProcessingSequence(Sequence):
9 | def __init__(self, X, y, batch_size, process_fn=None):
10 | """A `Sequence` implementation that can pre-process a mini-batch via `process_fn`
11 |
12 | Args:
13 | X: The numpy array of inputs.
14 | y: The numpy array of targets.
15 | batch_size: The generator mini-batch size.
16 | process_fn: The preprocessing function to apply on `X`
17 | """
18 | self.X = X
19 | self.y = y
20 | self.batch_size = batch_size
21 | self.process_fn = process_fn or (lambda x: x)
22 |
23 | def __len__(self):
24 | return len(self.X) // self.batch_size
25 |
26 | def on_epoch_end(self):
27 | pass
28 |
29 | def __getitem__(self, batch_idx):
30 | batch_X = self.X[batch_idx *
31 | self.batch_size:(batch_idx + 1) * self.batch_size]
32 | batch_y = self.y[batch_idx *
33 | self.batch_size:(batch_idx + 1) * self.batch_size]
34 | return self.process_fn(batch_X), batch_y
35 |
36 |
37 | class BalancedSequence(Sequence):
38 | def __init__(self, X, y, batch_size, process_fn=None):
39 | """A `Sequence` implementation that returns balanced `y` by undersampling majority class.
40 |
41 | Args:
42 | X: The numpy array of inputs.
43 | y: The numpy array of targets.
44 | batch_size: The generator mini-batch size.
45 | process_fn: The preprocessing function to apply on `X`
46 | """
47 | self.X = X
48 | self.y = y
49 | self.batch_size = batch_size
50 | self.process_fn = process_fn or (lambda x: x)
51 |
52 | self.pos_indices = np.where(y == 1)[0]
53 | self.neg_indices = np.where(y == 0)[0]
54 | self.n = min(len(self.pos_indices), len(self.neg_indices))
55 | self._index_array = None
56 |
57 | def __len__(self):
58 | # Reset batch after we are done with minority class.
59 | return (self.n * 2) // self.batch_size
60 |
61 | def on_epoch_end(self):
62 | # Reset batch after all minority indices are covered.
63 | self._index_array = None
64 |
65 | def __getitem__(self, batch_idx):
66 | if self._index_array is None:
67 | pos_indices = self.pos_indices.copy()
68 | neg_indices = self.neg_indices.copy()
69 | np.random.shuffle(pos_indices)
70 | np.random.shuffle(neg_indices)
71 | self._index_array = np.concatenate(
72 | (pos_indices[:self.n], neg_indices[:self.n]))
73 | np.random.shuffle(self._index_array)
74 |
75 | indices = self._index_array[batch_idx *
76 | self.batch_size: (batch_idx + 1) * self.batch_size]
77 | return self.process_fn(self.X[indices]), self.y[indices]
78 |
--------------------------------------------------------------------------------
/texcla/utils/io.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import pickle
4 |
5 | import numpy as np
6 |
7 | import joblib
8 | import jsonpickle
9 | from jsonpickle.ext import numpy as jsonpickle_numpy
10 |
11 | jsonpickle_numpy.register_handlers()
12 |
13 |
14 | def dump(obj, file_name):
15 | if file_name.endswith('.json'):
16 | with open(file_name, 'w') as f:
17 | f.write(jsonpickle.dumps(obj))
18 | return
19 |
20 | if isinstance(obj, np.ndarray):
21 | np.save(file_name, obj)
22 | return
23 |
24 | # Using joblib instead of pickle because of http://bugs.python.org/issue11564
25 | joblib.dump(obj, file_name, protocol=pickle.HIGHEST_PROTOCOL)
26 |
27 |
28 | def load(file_name):
29 | if file_name.endswith('.json'):
30 | with open(file_name, 'r') as f:
31 | return jsonpickle.loads(f.read())
32 |
33 | if file_name.endswith('.npy'):
34 | return np.load(file_name)
35 |
36 | return joblib.load(file_name)
37 |
--------------------------------------------------------------------------------
/texcla/utils/sampling.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import logging
4 | from fractions import Fraction
5 |
6 | import numpy as np
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | def equal_distribution_folds(y, folds=2):
12 | """Creates `folds` number of indices that has roughly balanced multi-label distribution.
13 |
14 | Args:
15 | y: The multi-label outputs.
16 | folds: The number of folds to create.
17 |
18 | Returns:
19 | `folds` number of indices that have roughly equal multi-label distributions.
20 | """
21 | n, classes = y.shape
22 |
23 | # Compute sample distribution over classes
24 | dist = y.sum(axis=0).astype('float')
25 | dist /= dist.sum()
26 |
27 | index_list = []
28 | fold_dist = np.zeros((folds, classes), dtype='float')
29 | for _ in range(folds):
30 | index_list.append([])
31 | for i in range(n):
32 | if i < folds:
33 | target_fold = i
34 | else:
35 | normed_folds = fold_dist.T / fold_dist.sum(axis=1)
36 | how_off = normed_folds.T - dist
37 | target_fold = np.argmin(
38 | np.dot((y[i] - .5).reshape(1, -1), how_off.T))
39 | fold_dist[target_fold] += y[i]
40 | index_list[target_fold].append(i)
41 |
42 | logger.debug("Fold distributions:")
43 | logger.debug(fold_dist)
44 | return index_list
45 |
46 |
47 | def multi_label_train_test_split(y, test_size=0.2):
48 | """Creates a test split with roughly the same multi-label distribution in `y`.
49 |
50 | Args:
51 | y: The multi-label outputs.
52 | test_size: The test size in [0, 1]
53 |
54 | Returns:
55 | The train and test indices.
56 | """
57 | if test_size <= 0 or test_size >= 1:
58 | raise ValueError("`test_size` should be between 0 and 1")
59 |
60 | # Find the smallest rational number.
61 | frac = Fraction(test_size).limit_denominator()
62 | test_folds, total_folds = frac.numerator, frac.denominator
63 | logger.warn('Inferring test_size as {}/{}. Generating {} folds. The algorithm might fail if denominator is large.'
64 | .format(test_folds, total_folds, total_folds))
65 |
66 | folds = equal_distribution_folds(y, folds=total_folds)
67 | test_indices = np.concatenate(folds[:test_folds])
68 | train_indices = np.concatenate(folds[test_folds:])
69 | return train_indices, test_indices
70 |
--------------------------------------------------------------------------------