├── joelnet ├── __init__.py ├── tensor.py ├── optim.py ├── loss.py ├── train.py ├── nn.py ├── data.py └── layers.py ├── requirements.txt ├── .gitignore ├── xor.py ├── LICENSE ├── README.md ├── fizzbuzz.py └── .pylintrc /joelnet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .mypy_cache 2 | *.pyc 3 | -------------------------------------------------------------------------------- /joelnet/tensor.py: -------------------------------------------------------------------------------- 1 | """ 2 | A tensor is just a n-dimensional array 3 | """ 4 | from numpy import ndarray as Tensor 5 | -------------------------------------------------------------------------------- /joelnet/optim.py: -------------------------------------------------------------------------------- 1 | """ 2 | We use an optimizer to adjust the parameters 3 | of our network based on the gradients computed 4 | during backpropagation 5 | """ 6 | from joelnet.nn import NeuralNet 7 | 8 | class Optimizer: 9 | def step(self, net: NeuralNet) -> None: 10 | raise NotImplementedError 11 | 12 | 13 | class SGD(Optimizer): 14 | def __init__(self, lr: float = 0.01) -> None: 15 | self.lr = lr 16 | 17 | def step(self, net: NeuralNet) -> None: 18 | for param, grad in net.params_and_grads(): 19 | param -= self.lr * grad 20 | -------------------------------------------------------------------------------- /xor.py: -------------------------------------------------------------------------------- 1 | """ 2 | The canonical example of a function that can't be 3 | learned with a simple linear model is XOR 4 | """ 5 | import numpy as np 6 | 7 | from joelnet.train import train 8 | from joelnet.nn import NeuralNet 9 | from joelnet.layers import Linear, Tanh 10 | 11 | inputs = np.array([ 12 | [0, 0], 13 | [1, 0], 14 | [0, 1], 15 | [1, 1] 16 | ]) 17 | 18 | targets = np.array([ 19 | [1, 0], 20 | [0, 1], 21 | [0, 1], 22 | [1, 0] 23 | ]) 24 | 25 | net = NeuralNet([ 26 | Linear(input_size=2, output_size=2), 27 | Tanh(), 28 | Linear(input_size=2, output_size=2) 29 | ]) 30 | 31 | train(net, inputs, targets) 32 | 33 | for x, y in zip(inputs, targets): 34 | predicted = net.forward(x) 35 | print(x, predicted, y) 36 | -------------------------------------------------------------------------------- /joelnet/loss.py: -------------------------------------------------------------------------------- 1 | """ 2 | A loss function measures how good our predictions are, 3 | we can use this to adjust the parameters of our network 4 | """ 5 | import numpy as np 6 | 7 | from joelnet.tensor import Tensor 8 | 9 | class Loss: 10 | def loss(self, predicted: Tensor, actual: Tensor) -> float: 11 | raise NotImplementedError 12 | 13 | def grad(self, predicted: Tensor, actual: Tensor) -> Tensor: 14 | raise NotImplementedError 15 | 16 | 17 | class MSE(Loss): 18 | """ 19 | MSE is mean squared error, although we're 20 | just going to do total squared error 21 | """ 22 | def loss(self, predicted: Tensor, actual: Tensor) -> float: 23 | return np.sum((predicted - actual) ** 2) 24 | 25 | def grad(self, predicted: Tensor, actual: Tensor) -> Tensor: 26 | return 2 * (predicted - actual) 27 | -------------------------------------------------------------------------------- /joelnet/train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Here's a function that can train a neural net 3 | """ 4 | 5 | from joelnet.tensor import Tensor 6 | from joelnet.nn import NeuralNet 7 | from joelnet.loss import Loss, MSE 8 | from joelnet.optim import Optimizer, SGD 9 | from joelnet.data import DataIterator, BatchIterator 10 | 11 | 12 | def train(net: NeuralNet, 13 | inputs: Tensor, 14 | targets: Tensor, 15 | num_epochs: int = 5000, 16 | iterator: DataIterator = BatchIterator(), 17 | loss: Loss = MSE(), 18 | optimizer: Optimizer = SGD()) -> None: 19 | for epoch in range(num_epochs): 20 | epoch_loss = 0.0 21 | for batch in iterator(inputs, targets): 22 | predicted = net.forward(batch.inputs) 23 | epoch_loss += loss.loss(predicted, batch.targets) 24 | grad = loss.grad(predicted, batch.targets) 25 | net.backward(grad) 26 | optimizer.step(net) 27 | print(epoch, epoch_loss) 28 | -------------------------------------------------------------------------------- /joelnet/nn.py: -------------------------------------------------------------------------------- 1 | """ 2 | A NeuralNet is just a collection of layers. 3 | It behaves a lot like a layer itself, although 4 | we're not going to make it one. 5 | """ 6 | from typing import Sequence, Iterator, Tuple 7 | 8 | from joelnet.tensor import Tensor 9 | from joelnet.layers import Layer 10 | 11 | 12 | class NeuralNet: 13 | def __init__(self, layers: Sequence[Layer]) -> None: 14 | self.layers = layers 15 | 16 | def forward(self, inputs: Tensor) -> Tensor: 17 | for layer in self.layers: 18 | inputs = layer.forward(inputs) 19 | return inputs 20 | 21 | def backward(self, grad: Tensor) -> Tensor: 22 | for layer in reversed(self.layers): 23 | grad = layer.backward(grad) 24 | return grad 25 | 26 | def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]: 27 | for layer in self.layers: 28 | for name, param in layer.params.items(): 29 | grad = layer.grads[name] 30 | yield param, grad 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Joel Grus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /joelnet/data.py: -------------------------------------------------------------------------------- 1 | """ 2 | We'll feed inputs into our network in batches. 3 | So here are some tools for iterating over data in batches. 4 | """ 5 | from typing import Iterator, NamedTuple 6 | 7 | import numpy as np 8 | 9 | from joelnet.tensor import Tensor 10 | 11 | Batch = NamedTuple("Batch", [("inputs", Tensor), ("targets", Tensor)]) 12 | 13 | 14 | class DataIterator: 15 | def __call__(self, inputs: Tensor, targets: Tensor) -> Iterator[Batch]: 16 | raise NotImplementedError 17 | 18 | 19 | class BatchIterator(DataIterator): 20 | def __init__(self, batch_size: int = 32, shuffle: bool = True) -> None: 21 | self.batch_size = batch_size 22 | self.shuffle = shuffle 23 | 24 | def __call__(self, inputs: Tensor, targets: Tensor) -> Iterator[Batch]: 25 | starts = np.arange(0, len(inputs), self.batch_size) 26 | if self.shuffle: 27 | np.random.shuffle(starts) 28 | 29 | for start in starts: 30 | end = start + self.batch_size 31 | batch_inputs = inputs[start:end] 32 | batch_targets = targets[start:end] 33 | yield Batch(batch_inputs, batch_targets) 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Livecoding Madness: Let's Build a Deep Learning Library 2 | 3 | [video](https://www.youtube.com/watch?v=o64FV-ez6Gw) of the livecoding. 4 | 5 | [slides](https://docs.google.com/presentation/d/1y9aC4hbNbcpjw9r6BMSayBmEdmdUwR5us_26X3S2j2M/edit?usp=sharing) 6 | 7 | -------- 8 | 9 | [Joel Grus](http://joelgrus.com) 10 | 11 | [@joelgrus](https://twitter.com/joelgrus) 12 | 13 | research engineer, [Allen Institute for AI](http://allenai.org) 14 | 15 | author, [Data Science from Scratch](http://shop.oreilly.com/product/0636920033400.do) 16 | 17 | co-host, ["Adversarial Learning" podcast](http://adversariallearning.com) 18 | 19 | # Livecoding Madness: Let's Build a Deep Learning Library 20 | 21 | In this talk we'll build a deep learning library 22 | from scratch (well, from NumPy) in 45 minutes. 23 | 24 | How is that possible? By typing _really_ fast. 25 | 26 | Along the way we'll talk about library design, 27 | type hinting, Python 3.6, and of course deep learning. 28 | 29 | 30 | # Here's the plan: 31 | 32 | 1. Tensors 33 | 2. Loss Functions 34 | 3. Layers 35 | 4. Neural Nets 36 | 5. Optimizers 37 | 6. Data 38 | 7. Training 39 | 8. XOR Example 40 | 9. FizzBuzz Example 41 | -------------------------------------------------------------------------------- /fizzbuzz.py: -------------------------------------------------------------------------------- 1 | """ 2 | FizzBuzz is the following problem: 3 | 4 | For each of the numbers 1 to 100: 5 | * if the number is divisible by 3, print "fizz" 6 | * if the number is divisible by 5, print "buzz" 7 | * if the number is divisible by 15, print "fizzbuzz" 8 | * otherwise, just print the number 9 | """ 10 | from typing import List 11 | 12 | import numpy as np 13 | 14 | from joelnet.train import train 15 | from joelnet.nn import NeuralNet 16 | from joelnet.layers import Linear, Tanh 17 | from joelnet.optim import SGD 18 | 19 | def fizz_buzz_encode(x: int) -> List[int]: 20 | if x % 15 == 0: 21 | return [0, 0, 0, 1] 22 | elif x % 5 == 0: 23 | return [0, 0, 1, 0] 24 | elif x % 3 == 0: 25 | return [0, 1, 0, 0] 26 | else: 27 | return [1, 0, 0, 0] 28 | 29 | 30 | def binary_encode(x: int) -> List[int]: 31 | """ 32 | 10 digit binary encoding of x 33 | """ 34 | return [x >> i & 1 for i in range(10)] 35 | 36 | inputs = np.array([ 37 | binary_encode(x) 38 | for x in range(101, 1024) 39 | ]) 40 | 41 | targets = np.array([ 42 | fizz_buzz_encode(x) 43 | for x in range(101, 1024) 44 | ]) 45 | 46 | net = NeuralNet([ 47 | Linear(input_size=10, output_size=50), 48 | Tanh(), 49 | Linear(input_size=50, output_size=4) 50 | ]) 51 | 52 | train(net, 53 | inputs, 54 | targets, 55 | num_epochs=5000, 56 | optimizer=SGD(lr=0.001)) 57 | 58 | for x in range(1, 101): 59 | predicted = net.forward(binary_encode(x)) 60 | predicted_idx = np.argmax(predicted) 61 | actual_idx = np.argmax(fizz_buzz_encode(x)) 62 | labels = [str(x), "fizz", "buzz", "fizzbuzz"] 63 | print(x, labels[predicted_idx], labels[actual_idx]) 64 | -------------------------------------------------------------------------------- /joelnet/layers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Our neural nets will be made up of layers. 3 | Each layer needs to pass its inputs forward 4 | and propagate gradients backward. For example, 5 | a neural net might look like 6 | 7 | inputs -> Linear -> Tanh -> Linear -> output 8 | """ 9 | from typing import Dict, Callable 10 | 11 | import numpy as np 12 | 13 | from joelnet.tensor import Tensor 14 | 15 | 16 | class Layer: 17 | def __init__(self) -> None: 18 | self.params: Dict[str, Tensor] = {} 19 | self.grads: Dict[str, Tensor] = {} 20 | 21 | def forward(self, inputs: Tensor) -> Tensor: 22 | """ 23 | Produce the outputs corresponding to these inputs 24 | """ 25 | raise NotImplementedError 26 | 27 | def backward(self, grad: Tensor) -> Tensor: 28 | """ 29 | Backpropagate this gradient through the layer 30 | """ 31 | raise NotImplementedError 32 | 33 | 34 | class Linear(Layer): 35 | """ 36 | computes output = inputs @ w + b 37 | """ 38 | def __init__(self, input_size: int, output_size: int) -> None: 39 | # inputs will be (batch_size, input_size) 40 | # outputs will be (batch_size, output_size) 41 | super().__init__() 42 | self.params["w"] = np.random.randn(input_size, output_size) 43 | self.params["b"] = np.random.randn(output_size) 44 | 45 | def forward(self, inputs: Tensor) -> Tensor: 46 | """ 47 | outputs = inputs @ w + b 48 | """ 49 | self.inputs = inputs 50 | return inputs @ self.params["w"] + self.params["b"] 51 | 52 | def backward(self, grad: Tensor) -> Tensor: 53 | """ 54 | if y = f(x) and x = a * b + c 55 | then dy/da = f'(x) * b 56 | and dy/db = f'(x) * a 57 | and dy/dc = f'(x) 58 | 59 | if y = f(x) and x = a @ b + c 60 | then dy/da = f'(x) @ b.T 61 | and dy/db = a.T @ f'(x) 62 | and dy/dc = f'(x) 63 | """ 64 | self.grads["b"] = np.sum(grad, axis=0) 65 | self.grads["w"] = self.inputs.T @ grad 66 | return grad @ self.params["w"].T 67 | 68 | 69 | F = Callable[[Tensor], Tensor] 70 | 71 | class Activation(Layer): 72 | """ 73 | An activation layer just applies a function 74 | elementwise to its inputs 75 | """ 76 | def __init__(self, f: F, f_prime: F) -> None: 77 | super().__init__() 78 | self.f = f 79 | self.f_prime = f_prime 80 | 81 | def forward(self, inputs: Tensor) -> Tensor: 82 | self.inputs = inputs 83 | return self.f(inputs) 84 | 85 | def backward(self, grad: Tensor) -> Tensor: 86 | """ 87 | if y = f(x) and x = g(z) 88 | then dy/dz = f'(x) * g'(z) 89 | """ 90 | return self.f_prime(self.inputs) * grad 91 | 92 | 93 | def tanh(x: Tensor) -> Tensor: 94 | return np.tanh(x) 95 | 96 | def tanh_prime(x: Tensor) -> Tensor: 97 | y = tanh(x) 98 | return 1 - y ** 2 99 | 100 | 101 | class Tanh(Activation): 102 | def __init__(self): 103 | super().__init__(tanh, tanh_prime) 104 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code 6 | extension-pkg-whitelist= 7 | 8 | # Add files or directories to the blacklist. They should be base names, not 9 | # paths. 10 | ignore=CVS 11 | 12 | # Add files or directories matching the regex patterns to the blacklist. The 13 | # regex matches against base names, not paths. 14 | ignore-patterns= 15 | 16 | # Python code to execute, usually for sys.path manipulation such as 17 | # pygtk.require(). 18 | #init-hook= 19 | 20 | # Use multiple processes to speed up Pylint. 21 | jobs=1 22 | 23 | # List of plugins (as comma separated values of python modules names) to load, 24 | # usually to register additional checkers. 25 | load-plugins= 26 | 27 | # Pickle collected data for later comparisons. 28 | persistent=yes 29 | 30 | # Specify a configuration file. 31 | #rcfile= 32 | 33 | # Allow loading of arbitrary C extensions. Extensions are imported into the 34 | # active Python interpreter and may run arbitrary code. 35 | unsafe-load-any-extension=no 36 | 37 | 38 | [MESSAGES CONTROL] 39 | 40 | # Only show warnings with the listed confidence levels. Leave empty to show 41 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 42 | confidence= 43 | 44 | # Disable the message, report, category or checker with the given id(s). You 45 | # can either give multiple identifiers separated by comma (,) or put this 46 | # option multiple times (only on the command line, not in the configuration 47 | # file where it should appear only once).You can also use "--disable=all" to 48 | # disable everything first and then reenable specific checks. For example, if 49 | # you want to run only the similarities checker, you can use "--disable=all 50 | # --enable=similarities". If you want to run only the classes checker, but have 51 | # no Warning level messages displayed, use"--disable=all --enable=classes 52 | # --disable=W" 53 | disable=print-statement,parameter-unpacking,unpacking-in-except,old-raise-syntax,backtick,long-suffix,old-ne-operator,old-octal-literal,import-star-module-level,raw-checker-failed,bad-inline-option,locally-disabled,locally-enabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,apply-builtin,basestring-builtin,buffer-builtin,cmp-builtin,coerce-builtin,execfile-builtin,file-builtin,long-builtin,raw_input-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,no-absolute-import,old-division,dict-iter-method,dict-view-method,next-method-called,metaclass-assignment,indexing-exception,raising-string,reload-builtin,oct-method,hex-method,nonzero-method,cmp-method,input-builtin,round-builtin,intern-builtin,unichr-builtin,map-builtin-not-iterating,zip-builtin-not-iterating,range-builtin-not-iterating,filter-builtin-not-iterating,using-cmp-argument,eq-without-hash,div-method,idiv-method,rdiv-method,exception-message-attribute,invalid-str-codec,sys-max-int,bad-python3-import,deprecated-string-function,deprecated-str-translate-call,missing-docstring,too-many-arguments,invalid-name,too-few-public-methods 54 | 55 | # Enable the message, report, category or checker with the given id(s). You can 56 | # either give multiple identifier separated by comma (,) or put this option 57 | # multiple time (only on the command line, not in the configuration file where 58 | # it should appear only once). See also the "--disable" option for examples. 59 | enable= 60 | 61 | 62 | [REPORTS] 63 | 64 | # Python expression which should return a note less than 10 (10 is the highest 65 | # note). You have access to the variables errors warning, statement which 66 | # respectively contain the number of errors / warnings messages and the total 67 | # number of statements analyzed. This is used by the global evaluation report 68 | # (RP0004). 69 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 70 | 71 | # Template used to display messages. This is a python new-style format string 72 | # used to format the message information. See doc for all details 73 | #msg-template= 74 | 75 | # Set the output format. Available formats are text, parseable, colorized, json 76 | # and msvs (visual studio).You can also give a reporter class, eg 77 | # mypackage.mymodule.MyReporterClass. 78 | output-format=text 79 | 80 | # Tells whether to display a full report or only the messages 81 | reports=no 82 | 83 | # Activate the evaluation score. 84 | score=yes 85 | 86 | 87 | [REFACTORING] 88 | 89 | # Maximum number of nested blocks for function / method body 90 | max-nested-blocks=5 91 | 92 | 93 | [BASIC] 94 | 95 | # Naming hint for argument names 96 | argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 97 | 98 | # Regular expression matching correct argument names 99 | argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 100 | 101 | # Naming hint for attribute names 102 | attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 103 | 104 | # Regular expression matching correct attribute names 105 | attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 106 | 107 | # Bad variable names which should always be refused, separated by a comma 108 | bad-names=foo,bar,baz,toto,tutu,tata 109 | 110 | # Naming hint for class attribute names 111 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 112 | 113 | # Regular expression matching correct class attribute names 114 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 115 | 116 | # Naming hint for class names 117 | class-name-hint=[A-Z_][a-zA-Z0-9]+$ 118 | 119 | # Regular expression matching correct class names 120 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 121 | 122 | # Naming hint for constant names 123 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 124 | 125 | # Regular expression matching correct constant names 126 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 127 | 128 | # Minimum line length for functions/classes that require docstrings, shorter 129 | # ones are exempt. 130 | docstring-min-length=-1 131 | 132 | # Naming hint for function names 133 | function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 134 | 135 | # Regular expression matching correct function names 136 | function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 137 | 138 | # Good variable names which should always be accepted, separated by a comma 139 | good-names=i,j,k,ex,Run,_ 140 | 141 | # Include a hint for the correct naming format with invalid-name 142 | include-naming-hint=no 143 | 144 | # Naming hint for inline iteration names 145 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ 146 | 147 | # Regular expression matching correct inline iteration names 148 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 149 | 150 | # Naming hint for method names 151 | method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 152 | 153 | # Regular expression matching correct method names 154 | method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 155 | 156 | # Naming hint for module names 157 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 158 | 159 | # Regular expression matching correct module names 160 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 161 | 162 | # Colon-delimited sets of names that determine each other's naming style when 163 | # the name regexes allow several styles. 164 | name-group= 165 | 166 | # Regular expression which should only match function or class names that do 167 | # not require a docstring. 168 | no-docstring-rgx=^_ 169 | 170 | # List of decorators that produce properties, such as abc.abstractproperty. Add 171 | # to this list to register other decorators that produce valid properties. 172 | property-classes=abc.abstractproperty 173 | 174 | # Naming hint for variable names 175 | variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 176 | 177 | # Regular expression matching correct variable names 178 | variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 179 | 180 | 181 | [FORMAT] 182 | 183 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 184 | expected-line-ending-format= 185 | 186 | # Regexp for a line that is allowed to be longer than the limit. 187 | ignore-long-lines=^\s*(# )??$ 188 | 189 | # Number of spaces of indent required inside a hanging or continued line. 190 | indent-after-paren=4 191 | 192 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 193 | # tab). 194 | indent-string=' ' 195 | 196 | # Maximum number of characters on a single line. 197 | max-line-length=100 198 | 199 | # Maximum number of lines in a module 200 | max-module-lines=1000 201 | 202 | # List of optional constructs for which whitespace checking is disabled. `dict- 203 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 204 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 205 | # `empty-line` allows space-only lines. 206 | no-space-check=trailing-comma,dict-separator 207 | 208 | # Allow the body of a class to be on the same line as the declaration if body 209 | # contains single statement. 210 | single-line-class-stmt=no 211 | 212 | # Allow the body of an if to be on the same line as the test if there is no 213 | # else. 214 | single-line-if-stmt=no 215 | 216 | 217 | [LOGGING] 218 | 219 | # Logging modules to check that the string format arguments are in logging 220 | # function parameter format 221 | logging-modules=logging 222 | 223 | 224 | [MISCELLANEOUS] 225 | 226 | # List of note tags to take in consideration, separated by a comma. 227 | notes=FIXME,XXX,TODO 228 | 229 | 230 | [SIMILARITIES] 231 | 232 | # Ignore comments when computing similarities. 233 | ignore-comments=yes 234 | 235 | # Ignore docstrings when computing similarities. 236 | ignore-docstrings=yes 237 | 238 | # Ignore imports when computing similarities. 239 | ignore-imports=no 240 | 241 | # Minimum lines number of a similarity. 242 | min-similarity-lines=4 243 | 244 | 245 | [SPELLING] 246 | 247 | # Spelling dictionary name. Available dictionaries: none. To make it working 248 | # install python-enchant package. 249 | spelling-dict= 250 | 251 | # List of comma separated words that should not be checked. 252 | spelling-ignore-words= 253 | 254 | # A path to a file that contains private dictionary; one word per line. 255 | spelling-private-dict-file= 256 | 257 | # Tells whether to store unknown words to indicated private dictionary in 258 | # --spelling-private-dict-file option instead of raising a message. 259 | spelling-store-unknown-words=no 260 | 261 | 262 | [TYPECHECK] 263 | 264 | # List of decorators that produce context managers, such as 265 | # contextlib.contextmanager. Add to this list to register other decorators that 266 | # produce valid context managers. 267 | contextmanager-decorators=contextlib.contextmanager 268 | 269 | # List of members which are set dynamically and missed by pylint inference 270 | # system, and so shouldn't trigger E1101 when accessed. Python regular 271 | # expressions are accepted. 272 | generated-members= 273 | 274 | # Tells whether missing members accessed in mixin class should be ignored. A 275 | # mixin class is detected if its name ends with "mixin" (case insensitive). 276 | ignore-mixin-members=yes 277 | 278 | # This flag controls whether pylint should warn about no-member and similar 279 | # checks whenever an opaque object is returned when inferring. The inference 280 | # can return multiple potential results while evaluating a Python object, but 281 | # some branches might not be evaluated, which results in partial inference. In 282 | # that case, it might be useful to still emit no-member and other checks for 283 | # the rest of the inferred objects. 284 | ignore-on-opaque-inference=yes 285 | 286 | # List of class names for which member attributes should not be checked (useful 287 | # for classes with dynamically set attributes). This supports the use of 288 | # qualified names. 289 | ignored-classes=optparse.Values,thread._local,_thread._local 290 | 291 | # List of module names for which member attributes should not be checked 292 | # (useful for modules/projects where namespaces are manipulated during runtime 293 | # and thus existing member attributes cannot be deduced by static analysis. It 294 | # supports qualified module names, as well as Unix pattern matching. 295 | ignored-modules=numpy,numpy.random 296 | 297 | # Show a hint with possible names when a member name was not found. The aspect 298 | # of finding the hint is based on edit distance. 299 | missing-member-hint=yes 300 | 301 | # The minimum edit distance a name should have in order to be considered a 302 | # similar match for a missing member name. 303 | missing-member-hint-distance=1 304 | 305 | # The total number of similar names that should be taken in consideration when 306 | # showing a hint for a missing member. 307 | missing-member-max-choices=1 308 | 309 | 310 | [VARIABLES] 311 | 312 | # List of additional names supposed to be defined in builtins. Remember that 313 | # you should avoid to define new builtins when possible. 314 | additional-builtins= 315 | 316 | # Tells whether unused global variables should be treated as a violation. 317 | allow-global-unused-variables=yes 318 | 319 | # List of strings which can identify a callback function by name. A callback 320 | # name must start or end with one of those strings. 321 | callbacks=cb_,_cb 322 | 323 | # A regular expression matching the name of dummy variables (i.e. expectedly 324 | # not used). 325 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 326 | 327 | # Argument names that match this expression will be ignored. Default to name 328 | # with leading underscore 329 | ignored-argument-names=_.*|^ignored_|^unused_ 330 | 331 | # Tells whether we should check for unused import in __init__ files. 332 | init-import=no 333 | 334 | # List of qualified module names which can have objects that can redefine 335 | # builtins. 336 | redefining-builtins-modules=six.moves,future.builtins 337 | 338 | 339 | [CLASSES] 340 | 341 | # List of method names used to declare (i.e. assign) instance attributes. 342 | defining-attr-methods=__init__,__new__,setUp 343 | 344 | # List of member names, which should be excluded from the protected access 345 | # warning. 346 | exclude-protected=_asdict,_fields,_replace,_source,_make 347 | 348 | # List of valid names for the first argument in a class method. 349 | valid-classmethod-first-arg=cls 350 | 351 | # List of valid names for the first argument in a metaclass class method. 352 | valid-metaclass-classmethod-first-arg=mcs 353 | 354 | 355 | [DESIGN] 356 | 357 | # Maximum number of arguments for function / method 358 | max-args=5 359 | 360 | # Maximum number of attributes for a class (see R0902). 361 | max-attributes=7 362 | 363 | # Maximum number of boolean expressions in a if statement 364 | max-bool-expr=5 365 | 366 | # Maximum number of branch for function / method body 367 | max-branches=12 368 | 369 | # Maximum number of locals for function / method body 370 | max-locals=15 371 | 372 | # Maximum number of parents for a class (see R0901). 373 | max-parents=7 374 | 375 | # Maximum number of public methods for a class (see R0904). 376 | max-public-methods=20 377 | 378 | # Maximum number of return / yield for function / method body 379 | max-returns=6 380 | 381 | # Maximum number of statements in function / method body 382 | max-statements=50 383 | 384 | # Minimum number of public methods for a class (see R0903). 385 | min-public-methods=2 386 | 387 | 388 | [IMPORTS] 389 | 390 | # Allow wildcard imports from modules that define __all__. 391 | allow-wildcard-with-all=no 392 | 393 | # Analyse import fallback blocks. This can be used to support both Python 2 and 394 | # 3 compatible code, which means that the block might have code that exists 395 | # only in one or another interpreter, leading to false positives when analysed. 396 | analyse-fallback-blocks=no 397 | 398 | # Deprecated modules which should not be used, separated by a comma 399 | deprecated-modules=optparse,tkinter.tix 400 | 401 | # Create a graph of external dependencies in the given file (report RP0402 must 402 | # not be disabled) 403 | ext-import-graph= 404 | 405 | # Create a graph of every (i.e. internal and external) dependencies in the 406 | # given file (report RP0402 must not be disabled) 407 | import-graph= 408 | 409 | # Create a graph of internal dependencies in the given file (report RP0402 must 410 | # not be disabled) 411 | int-import-graph= 412 | 413 | # Force import order to recognize a module as part of the standard 414 | # compatibility libraries. 415 | known-standard-library= 416 | 417 | # Force import order to recognize a module as part of a third party library. 418 | known-third-party=enchant 419 | 420 | 421 | [EXCEPTIONS] 422 | 423 | # Exceptions that will emit a warning when being caught. Defaults to 424 | # "Exception" 425 | overgeneral-exceptions=Exception 426 | --------------------------------------------------------------------------------