');
54 | for(var key in obj) {
55 | var a = $('
');
56 | a.html(key + ':');
57 | block.append(a);
58 | block.append(obj[key]);
59 | }
60 | customIndex.append(block);
61 | });
62 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/_static/sanitize_desc_name.js:
--------------------------------------------------------------------------------
1 | //File: sanitize_desc_name.js
2 |
3 | $(function (){
4 | var selected = $('div.section>dl>dt>code.descclassname');
5 | selected.each(function(_, e) {
6 | var text = e.innerText;
7 | if (text.startsWith('tensorpack.')) {
8 | text = text.substr(11);
9 | e.innerText = text;
10 | }
11 | });
12 | });
13 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {%- block extrahead %}
4 |
7 |
10 |
11 |
20 | {% endblock %}
21 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/casestudies/index.rst:
--------------------------------------------------------------------------------
1 | Casestudies
2 | --------------------
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 |
8 | colorize
9 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/index.rst:
--------------------------------------------------------------------------------
1 | Tensorpack Documentation
2 | ==============================
3 |
4 | .. image:: ../.github/tensorpack.png
5 |
6 | Tensorpack is a **training interface** based on TensorFlow.
7 |
8 | It's Yet Another TF wrapper, but different in:
9 |
10 | - Focus on **training speed**.
11 |
12 | - Speed comes for free with tensorpack -- it uses TensorFlow in the
13 | **efficient way** with no extra overhead. On various CNNs, it runs 1.5~1.7x faster than the equivalent Keras code.
14 |
15 | - Data-parallel multi-GPU training is off-the-shelf to use. It is as fast as Google's
16 | `official benchmark `_.
17 | You cannot beat its speed unless you're a TensorFlow expert.
18 |
19 | - See `tensorpack/benchmarks `_ for some benchmark scripts.
20 |
21 | - Focus on large datasets.
22 |
23 | - It's painful to read/preprocess data through TF. Tensorpack helps you load large datasets (e.g. ImageNet) in
24 | **pure Python** with autoparallelization.
25 |
26 | - It's not a model wrapper.
27 |
28 | - There are already too many symbolic function wrappers.
29 | Tensorpack includes only a few common models, but you can use any other wrappers within tensorpack, including sonnet/Keras/slim/tflearn/tensorlayer/....
30 |
31 | See :doc:`tutorial/index` to know more about these features:
32 |
33 |
34 | .. toctree::
35 | :maxdepth: 3
36 |
37 | tutorial/index
38 | modules/index
39 | .. casestudies/index
40 |
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/callbacks.rst:
--------------------------------------------------------------------------------
1 | tensorpack.callbacks package
2 | ============================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 |
11 | .. automodule:: tensorpack.callbacks
12 | :members:
13 | :no-undoc-members:
14 | :show-inheritance:
15 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/dataflow.dataset.rst:
--------------------------------------------------------------------------------
1 | tensorpack.dataflow.dataset package
2 | ===================================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 |
11 | .. automodule:: tensorpack.dataflow.dataset
12 | :members:
13 | :undoc-members:
14 | :show-inheritance:
15 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/dataflow.imgaug.rst:
--------------------------------------------------------------------------------
1 | tensorpack.dataflow.imgaug package
2 | ==================================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 | .. automodule:: tensorpack.dataflow.imgaug
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/dataflow.rst:
--------------------------------------------------------------------------------
1 | tensorpack.dataflow package
2 | ===========================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 |
11 | .. automodule:: tensorpack.dataflow
12 | :members:
13 | :undoc-members:
14 | :show-inheritance:
15 |
16 | tensorpack.dataflow.dftools module
17 | ----------------------------------
18 |
19 | .. automodule:: tensorpack.dataflow.dftools
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/graph_builder.rst:
--------------------------------------------------------------------------------
1 | tensorpack.graph_builder package
2 | ================================
3 |
4 | .. automodule:: tensorpack.graph_builder
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/index.rst:
--------------------------------------------------------------------------------
1 | API Documentation
2 | --------------------
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 |
8 | dataflow
9 | dataflow.dataset
10 | dataflow.imgaug
11 | input_source
12 | models
13 | callbacks
14 | graph_builder
15 | train
16 | predict
17 | tfutils
18 | utils
19 |
20 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/input_source.rst:
--------------------------------------------------------------------------------
1 | tensorpack.input_source package
2 | ================================
3 |
4 | .. automodule:: tensorpack.input_source
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/models.rst:
--------------------------------------------------------------------------------
1 | tensorpack.models package
2 | =========================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 |
11 | .. automodule:: tensorpack.models
12 | :members:
13 | :undoc-members:
14 | :show-inheritance:
15 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/predict.rst:
--------------------------------------------------------------------------------
1 | tensorpack.predict package
2 | ==========================
3 |
4 | .. automodule:: tensorpack.predict
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/tfutils.rst:
--------------------------------------------------------------------------------
1 | tensorpack.tfutils package
2 | ==========================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 | tensorpack.tfutils.collection module
11 | ------------------------------------
12 |
13 | .. automodule:: tensorpack.tfutils.collection
14 | :members:
15 | :undoc-members:
16 | :show-inheritance:
17 |
18 | tensorpack.tfutils.gradproc module
19 | ------------------------------------
20 |
21 | .. automodule:: tensorpack.tfutils.gradproc
22 | :members:
23 | :undoc-members:
24 | :show-inheritance:
25 |
26 | tensorpack.tfutils.tower module
27 | ------------------------------------
28 |
29 | .. automodule:: tensorpack.tfutils.tower
30 | :members:
31 | :undoc-members:
32 | :show-inheritance:
33 |
34 | tensorpack.tfutils.scope_utils module
35 | --------------------------------------
36 |
37 | .. automodule:: tensorpack.tfutils.scope_utils
38 | :members:
39 | :undoc-members:
40 | :show-inheritance:
41 |
42 | tensorpack.tfutils.optimizer module
43 | ------------------------------------
44 |
45 | .. automodule:: tensorpack.tfutils.optimizer
46 | :members:
47 | :undoc-members:
48 | :show-inheritance:
49 |
50 | tensorpack.tfutils.sesscreate module
51 | ------------------------------------
52 |
53 | .. automodule:: tensorpack.tfutils.sesscreate
54 | :members:
55 | :undoc-members:
56 | :show-inheritance:
57 |
58 | tensorpack.tfutils.sessinit module
59 | ------------------------------------
60 |
61 | .. automodule:: tensorpack.tfutils.sessinit
62 | :members:
63 | :undoc-members:
64 | :show-inheritance:
65 |
66 | tensorpack.tfutils.summary module
67 | ---------------------------------
68 |
69 | .. automodule:: tensorpack.tfutils.summary
70 | :members:
71 | :undoc-members:
72 | :show-inheritance:
73 |
74 | tensorpack.tfutils.varmanip module
75 | ----------------------------------
76 |
77 | .. automodule:: tensorpack.tfutils.varmanip
78 | :members:
79 | :undoc-members:
80 | :show-inheritance:
81 |
82 | tensorpack.tfutils.varreplace module
83 | ------------------------------------
84 |
85 | .. automodule:: tensorpack.tfutils.varreplace
86 | :members:
87 | :undoc-members:
88 | :show-inheritance:
89 |
90 | Other functions in tensorpack.tfutils module
91 | ---------------------------------------------
92 |
93 | .. automethod:: tensorpack.tfutils.get_default_sess_config
94 | .. automethod:: tensorpack.tfutils.get_global_step_var
95 | .. automethod:: tensorpack.tfutils.get_global_step_value
96 | .. automethod:: tensorpack.tfutils.argscope
97 | .. automethod:: tensorpack.tfutils.get_arg_scope
98 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/train.rst:
--------------------------------------------------------------------------------
1 | tensorpack.train package
2 | ========================
3 |
4 | .. container:: custom-index
5 |
6 | .. raw:: html
7 |
8 |
9 |
10 | .. automodule:: tensorpack.train
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/modules/utils.rst:
--------------------------------------------------------------------------------
1 | tensorpack.utils package
2 | ========================
3 |
4 | tensorpack.utils.argtools module
5 | --------------------------------
6 |
7 | .. automodule:: tensorpack.utils.argtools
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 | tensorpack.utils.concurrency module
13 | -----------------------------------
14 |
15 | .. automodule:: tensorpack.utils.concurrency
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
20 |
21 | tensorpack.utils.fs module
22 | --------------------------
23 |
24 | .. automodule:: tensorpack.utils.fs
25 | :members:
26 | :undoc-members:
27 | :show-inheritance:
28 |
29 | tensorpack.utils.loadcaffe module
30 | ---------------------------------
31 |
32 | .. automodule:: tensorpack.utils.loadcaffe
33 | :members:
34 | :undoc-members:
35 | :show-inheritance:
36 |
37 | tensorpack.utils.logger module
38 | ------------------------------
39 |
40 | .. automodule:: tensorpack.utils.logger
41 | :members:
42 | :undoc-members:
43 | :show-inheritance:
44 |
45 |
46 | tensorpack.utils.serialize module
47 | ---------------------------------
48 |
49 | .. automodule:: tensorpack.utils.serialize
50 | :members:
51 | :undoc-members:
52 | :show-inheritance:
53 |
54 | tensorpack.utils.stats module
55 | -----------------------------
56 |
57 | .. automodule:: tensorpack.utils.stats
58 | :members:
59 | :undoc-members:
60 | :show-inheritance:
61 |
62 | tensorpack.utils.timer module
63 | -----------------------------
64 |
65 | .. automodule:: tensorpack.utils.timer
66 | :members:
67 | :undoc-members:
68 | :show-inheritance:
69 |
70 | tensorpack.utils.viz module
71 | ---------------------------
72 |
73 | .. automodule:: tensorpack.utils.viz
74 | :members:
75 | :undoc-members:
76 | :show-inheritance:
77 |
78 | tensorpack.utils.gpu module
79 | ---------------------------
80 |
81 | .. automodule:: tensorpack.utils.gpu
82 | :members:
83 | :undoc-members:
84 | :show-inheritance:
85 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/callback.md:
--------------------------------------------------------------------------------
1 |
2 | # Callbacks
3 |
4 | Callback is an interface to do __everything else__ besides the training iterations.
5 |
6 | Apart from the actual training iterations that minimize the cost,
7 | you almost surely would like to do something else.
8 | There are several places where you might want to do something else:
9 |
10 | * Before the training has started (e.g. initialize the saver, dump the graph)
11 | * Along with each training iteration (e.g. run some other operations in the graph)
12 | * Between training iterations (e.g. update the progress bar, update hyperparameters)
13 | * Between epochs (e.g. save the model, run some validation)
14 | * After the training (e.g. send the model somewhere, send a message to your phone)
15 |
16 | We found people traditionally tend to write the training loop together with these extra features.
17 | This makes the loop lengthy, and the code for the same feature probably get separated (imagine a
18 | feature which needs initialization in the beginning and then some actual work between iterations).
19 |
20 | By writing callbacks to implement what to do at each place, tensorpack trainers
21 | will call the callbacks at the proper time.
22 | Therefore these features can be reused with one single line, as long as you are using tensorpack trainers.
23 |
24 | For example, these are the callbacks I used when training a ResNet:
25 |
26 | ```python
27 | callbacks=[
28 | # save the model every epoch
29 | ModelSaver(),
30 | # backup the model with best validation error
31 | MinSaver('val-error-top1'),
32 | # run inference on another Dataflow every epoch, compute classification error and log to monitors
33 | InferenceRunner(dataset_val, [
34 | ClassificationError('wrong-top1', 'val-error-top1'),
35 | ClassificationError('wrong-top5', 'val-error-top5')]),
36 | # schedule the learning rate based on epoch number
37 | ScheduledHyperParamSetter('learning_rate',
38 | [(30, 1e-2), (60, 1e-3), (85, 1e-4), (95, 1e-5)]),
39 | # can manually change the learning rate through a file, without interrupting training
40 | HumanHyperParamSetter('learning_rate'),
41 | # send validation error to my phone through pushbullet
42 | SendStat('curl -u your_id_xxx: https://api.pushbullet.com/v2/pushes \\
43 | -d type=note -d title="validation error" \\
44 | -d body={val-error-top1} > /dev/null 2>&1',
45 | 'val-error-top1'),
46 | # record GPU utilizations during training
47 | GPUUtilizationTracker(),
48 | # touch a file to pause the training and start a debug shell, to observe what's going on
49 | InjectShell(shell='ipython'),
50 | # estimate time until completion
51 | EstimatedTimeLeft()
52 | ] + [ # these callbacks are enabled by default already, though you can customize them
53 | # maintain those moving average summaries defined in the model (e.g. training loss, training error)
54 | MovingAverageSummary(),
55 | # draw a progress bar
56 | ProgressBar(),
57 | # run `tf.summary.merge_all` every epoch and log to monitors
58 | MergeAllSummaries(),
59 | # run ops in GraphKeys.UPDATE_OPS collection along with training, if any
60 | RunUpdateOps(),
61 | ],
62 | monitors=[ # monitors are a special kind of callbacks. these are also enabled by default
63 | # write everything to tensorboard
64 | TFEventWriter(),
65 | # write all scalar data to a json file, for easy parsing
66 | JSONWriter(),
67 | # print all scalar data every epoch (can be configured differently)
68 | ScalarPrinter(),
69 | ]
70 | ```
71 |
72 | Notice that callbacks cover every detail of training, ranging from graph operations to the progress bar.
73 | This means you can customize every part of the training to your preference, e.g. display something
74 | different in the progress bar, evaluate part of the summaries at a different frequency, etc.
75 |
76 | These features may not be always useful, but think about how messy the main loop would look like if you
77 | were to write these logic together with the loops, and how easy your life will be if you could enable
78 | these features with one line when you need them.
79 |
80 | See [Write a callback](extend/callback.html)
81 | for details on how callbacks work, what they can do, and how to write them.
82 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/dataflow.md:
--------------------------------------------------------------------------------
1 |
2 | # DataFlow
3 |
4 | ### What is DataFlow
5 |
6 | DataFlow is a library to build Python iterators for efficient data loading.
7 |
8 | **Definition**: A DataFlow is something that has a `get_data()` generator method,
9 | which yields `datapoints`.
10 | A datapoint is a **list** of Python objects which are called the `components` of a datapoint.
11 |
12 | **Example**: to train on MNIST dataset, you may need a DataFlow with a `get_data()` method
13 | that yields datapoints (lists) of two components:
14 | a numpy array of shape (64, 28, 28), and an array of shape (64,).
15 |
16 | ### Composition of DataFlow
17 | One good thing about having a standard interface is to be able to provide
18 | the greatest code reusability.
19 | There are a lot of existing DataFlow utilities in tensorpack, which you can use to compose
20 | complex DataFlow with a long data pipeline. A common pipeline usually
21 | would __read from disk (or other sources), apply transformations, group into batches,
22 | prefetch data__, etc. A simple example is as the following:
23 |
24 | ````python
25 | # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
26 | df = MyDataFlow(dir='/my/data', shuffle=True)
27 | # resize the image component of each datapoint
28 | df = AugmentImageComponent(df, [imgaug.Resize((225, 225))])
29 | # group data into batches of size 128
30 | df = BatchData(df, 128)
31 | # start 3 processes to run the dataflow in parallel
32 | df = PrefetchDataZMQ(df, 3)
33 | ````
34 | You can find more complicated DataFlow in the [ResNet training script](../examples/ResNet/imagenet_utils.py)
35 | with all the data preprocessing.
36 |
37 | Unless you are working with standard data types (image folders, LMDB, etc),
38 | you would usually want to write the source DataFlow (`MyDataFlow` in the above example) for your data format.
39 | See [another tutorial](extend/dataflow.html)
40 | for simple instructions on writing a DataFlow.
41 | Once you have the source reader, all the [existing DataFlows](../modules/dataflow.html) are ready for you to complete
42 | the rest of the data pipeline.
43 |
44 | ### Why DataFlow
45 |
46 | 1. It's easy: write everything in pure Python, and reuse existing utilities.
47 | On the contrary, writing data loaders in TF operators is usually painful, and performance is hard to tune.
48 | See more discussions in [Python Reader or TF Reader](input-source.html#python-reader-or-tf-reader).
49 | 2. It's fast: see [Efficient DataFlow](efficient-dataflow.html)
50 | on how to build a fast DataFlow with parallelism.
51 | If you're using DataFlow with tensorpack, also see [Input Pipeline tutorial](input-source.html)
52 | on how tensorpack further accelerates data loading in the graph.
53 |
54 | Nevertheless, tensorpack supports data loading with native TF operators / TF datasets as well.
55 |
56 | ### Use DataFlow (outside Tensorpack)
57 | Normally, tensorpack `InputSource` interface links DataFlow to the graph for training.
58 | If you use DataFlow in some custom code, call `reset_state()` first to initialize it,
59 | and then use the generator however you like:
60 | ```python
61 | df = SomeDataFlow()
62 |
63 | df.reset_state()
64 | generator = df.get_data()
65 | for dp in generator:
66 | # dp is now a list. do whatever
67 | ```
68 |
69 | DataFlow is __independent__ of both tensorpack and TensorFlow.
70 | To `import tensorpack.dataflow`, you don't even have to install TensorFlow.
71 | You can simply use it as a data processing pipeline and plug it into any other frameworks.
72 |
73 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/extend/augmentor.md:
--------------------------------------------------------------------------------
1 |
2 | ### Write an Image Augmentor
3 |
4 | The first thing to note: __you never have to write an augmentor__.
5 | An augmentor is a part of the DataFlow, so you can always
6 | [write a DataFlow](dataflow.html)
7 | to do whatever operations to your data, rather than writing an augmentor.
8 | Augmentors just sometimes make things easier.
9 |
10 | An image augmentor maps an image to an image.
11 | If you have such a mapping function `f` already, you can simply use `imgaug.MapImage(f)` as the
12 | augmentor, or use `MapDataComponent(dataflow, f, index)` as the DataFlow.
13 | In other words, for simple mapping you do not need to write an augmentor.
14 |
15 | An augmentor may do something more than just applying a mapping.
16 | The interface you will need to implement is:
17 |
18 | ```python
19 | class MyAug(imgaug.ImageAugmentor):
20 | def _get_augment_params(self, img):
21 | # generated random params with self.rng
22 | return params
23 |
24 | def _augment(self, img, params):
25 | return augmented_img
26 |
27 | # optional method
28 | def _augment_coords(self, coords, param):
29 | # coords is a Nx2 floating point array, each row is (x, y)
30 | return augmented_coords
31 | ```
32 |
33 | It does the following extra things for you:
34 |
35 | 1. `self.rng` is a `np.random.RandomState` object,
36 | guaranteed to have different seeds when you use multiprocess prefetch.
37 | In multiprocess settings, you have to use this rng to generate random numbers.
38 |
39 | 2. The logic of random parameter generation and the actual augmentation is separated in different methods.
40 | This allows you to apply the
41 | same transformation to several images together (with `AugmentImageComponents`),
42 | which is essential to tasks such as segmentation.
43 | Or apply the same transformations to images plus coordinate labels (with `AugmentImageCoordinates`),
44 | which is essential to tasks such as detection and localization.
45 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/extend/dataflow.md:
--------------------------------------------------------------------------------
1 |
2 | ### Write a DataFlow
3 |
4 | #### Write a Source DataFlow
5 |
6 | There are several existing DataFlow, e.g. [ImageFromFile](../../modules/dataflow.html#tensorpack.dataflow.ImageFromFile),
7 | [DataFromList](../../modules/dataflow.html#tensorpack.dataflow.DataFromList),
8 | which you can use if your data format is simple.
9 | In general, you probably need to write a source DataFlow to produce data for your task,
10 | and then compose it with existing modules (e.g. mapping, batching, prefetching, ...).
11 |
12 | The easiest way to create a DataFlow to load custom data, is to wrap a custom generator, e.g.:
13 | ```python
14 | def my_data_loader():
15 | while True:
16 | # load data from somewhere
17 | yield [my_array, my_label]
18 |
19 | dataflow = DataFromGenerator(my_data_loader)
20 | ```
21 |
22 | To write more complicated DataFlow, you need to inherit the base `DataFlow` class.
23 | Usually, you just need to implement the `get_data()` method which yields a datapoint every time.
24 | ```python
25 | class MyDataFlow(DataFlow):
26 | def get_data(self):
27 | for k in range(100):
28 | digit = np.random.rand(28, 28)
29 | label = np.random.randint(10)
30 | yield [digit, label]
31 | ```
32 |
33 | Optionally, you can implement the following two methods:
34 |
35 | + `size()`. Return the number of elements the generator can produce. Certain tensorpack features might use it.
36 |
37 | + `reset_state()`. It is guaranteed that the actual process which runs a DataFlow will invoke this method before using it.
38 | So if this DataFlow needs to do something after a `fork()`, you should put it here.
39 | `reset_state()` must be called once and only once for each DataFlow instance.
40 |
41 | A typical example is when your DataFlow uses random number generator (RNG). Then you would need to reset the RNG here.
42 | Otherwise, child processes will have the same random seed. The `RNGDataFlow` base class does this for you.
43 | You can subclass `RNGDataFlow` to access `self.rng` whose seed has been taken care of.
44 |
45 | DataFlow implementations for several well-known datasets are provided in the
46 | [dataflow.dataset](../../modules/dataflow.dataset.html)
47 | module, you can take them as a reference.
48 |
49 | #### More Data Processing
50 |
51 | You can put any data processing you need in the source DataFlow you write, or you can write a new DataFlow for data
52 | processing on top of the source DataFlow, e.g.:
53 |
54 | ```python
55 | class ProcessingDataFlow(DataFlow):
56 | def __init__(self, ds):
57 | self.ds = ds
58 |
59 | def get_data(self):
60 | for datapoint in self.ds.get_data():
61 | # do something
62 | yield new_datapoint
63 | ```
64 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/extend/model.md:
--------------------------------------------------------------------------------
1 |
2 | ## Write a Layer
3 |
4 | The first thing to note: __you never have to write a layer__.
5 | Tensorpack layers are nothing but wrappers of symbolic functions.
6 | You can use any symbolic functions you have written or seen elsewhere with or without tensorpack layers.
7 |
8 | If you would like, you can make a symbolic function become a "layer" by following some simple rules, and then gain benefits from the framework.
9 |
10 | Take a look at the [Convolutional Layer](../../tensorpack/models/conv2d.py#L14) implementation for an example of how to define a layer:
11 |
12 | ```python
13 | @layer_register(log_shape=True)
14 | def Conv2D(x, out_channel, kernel_shape,
15 | padding='SAME', stride=1,
16 | W_init=None, b_init=None,
17 | nl=tf.nn.relu, split=1, use_bias=True):
18 | ```
19 |
20 | Basically, a tensorpack layer is just a symbolic function, but with the following rules:
21 |
22 | + It is decorated by `@layer_register`.
23 | + The first argument is its "input". It must be a **tensor or a list of tensors**.
24 | + It returns either a tensor or a list of tensors as its "output".
25 |
26 |
27 | By making a symbolic function a "layer", the following things will happen:
28 | + You will need to call the function with a scope name as the first argument, e.g. `Conv2D('conv0', x, 32, 3)`.
29 | Everything happening in this function will be under the variable scope `conv0`.
30 | You can register the layer with `use_scope=False` to disable this feature.
31 | + Static shapes of input/output will be printed to screen (if you register with `log_shape=True`).
32 | + `argscope` will work for all its arguments except the input tensor(s).
33 | + It will work with `LinearWrap`: you can use it if the output of one layer matches the input of the next layer.
34 |
35 | There are also some (non-layer) symbolic functions in the `tfutils.symbolic_functions` module.
36 | There is not a rule about what kind of symbolic functions should be made a layer -- they are quite
37 | similar anyway. However, in general, I define the following symbolic functions as layers:
38 | + Functions which contain variables. A variable scope is almost always helpful for such functions.
39 | + Functions which are commonly referred to as "layers", such as pooling. This makes a model
40 | definition more straightforward.
41 |
42 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/extend/trainer.md:
--------------------------------------------------------------------------------
1 | ## Understand Trainer
2 |
3 | ### Role of Trainer
4 |
5 | Tensorpack follows the "define-and-run" paradigm. A training has two steps:
6 |
7 | 1. __Define__: Build graph for the model.
8 | Users can call whatever tensorflow functions to setup the graph.
9 | Users may or may not use tensorpack `InputSource`, `ModelDesc` or other utilities to build the graph.
10 | The goal of this step is to define "what to run" in later training steps,
11 | and it can happen __either inside or outside__ tensorpack trainer.
12 |
13 | 2. __Run__: Train the model (the [Trainer.train() method](../modules/train.html#tensorpack.train.Trainer.train)):
14 |
15 | 1. Setup callbacks/monitors.
16 | 2. Finalize graph, initialize session.
17 | 3. Run the training loop.
18 |
19 |
20 | ### Assumptions of Base Trainer
21 |
22 | * Q: What types of training can you do with tensorpack?
23 | * A: Anything that runs in a loop.
24 |
25 | In research we do training of various kind.
26 | Tensorpack trainers avoid making assumptions on what type of training
27 | you want to do (e.g., it doesn't have to be batched, SGD-like, or have `X`(inputs) and `y`(outputs)).
28 | The only assumption is that your training follows this pattern:
29 | ```python
30 | for epoch_num in range(starting_epoch, max_epoch):
31 | for local_step in range(steps_per_epoch):
32 | run_step()
33 | ```
34 |
35 | 1. Training is **running some iterations**.
36 | Tensorpack base trainer implements the logic of __running the iteration__.
37 | Users or derived trainers should implement __what the iteration is__.
38 |
39 | 2. Trainer assumes the existence of __"epoch"__, i.e. that the iterations run in double for-loops.
40 | But `steps_per_epoch` can be any number you set
41 | and it only affects the [schedule of callbacks](extend/callback.html).
42 | In other words, an "epoch" in tensorpack is the __default period to run callbacks__ (validation, summary, checkpoint, etc.).
43 |
44 |
45 | ### How Existing (Single-Cost) Trainers Work
46 |
47 | Most neural network training tasks are single-cost optimization.
48 | Tensorpack provides some trainer implementations for such tasks.
49 | These trainers will take care of step 1 (define the graph), with the following arguments:
50 |
51 | 1. Some `InputDesc`, the metadata about the input.
52 | 2. An `InputSource`, where the input come from. See [Input Pipeline](input-source.html).
53 | 3. A function which takes input tensors and returns the cost.
54 | 4. A function which returns an optimizer.
55 |
56 | These are documented in [SingleCostTrainer.setup_graph](../modules/train.html#tensorpack.train.SingleCostTrainer.setup_graph).
57 | In practice you'll not use this method directly, but use [high-level interface](training-interface.html#with-modeldesc-and-trainconfig) instead.
58 |
59 |
60 | ### Write a Trainer
61 |
62 | The existing trainers should be enough for single-tower single-cost optimization tasks.
63 | If you just want to do some extra work during training, first consider writing it as a callback,
64 | or write an issue to see if there is a better solution than creating new trainers.
65 | If your task is fundamentally different from single-cost optimization, you will need to write a trainer.
66 |
67 | You can do customize training by either using or inheriting the base `Trainer` class.
68 | You will need to define two things for a new Trainer:
69 |
70 | 1. Define the graph.
71 | Add any tensors and ops you like, either before creating the trainer or inside `Trainer.__init__`.
72 |
73 | 2. What is the iteration. There are 2 ways to define the iteration:
74 | 1. Set `Trainer.train_op`. This op will be run by default.
75 | 2. Subclass `Trainer` and override the `run_step()` method. This way you can do something more than running an op.
76 |
77 | There are several different [GAN trainers](../../examples/GAN/GAN.py) for reference.
78 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/faq.md:
--------------------------------------------------------------------------------
1 |
2 | # FAQs
3 |
4 | ## Does it support data format X / augmentation Y / layer Z?
5 |
6 | The library tries to __support__ everything, but it could not really __include__ everything.
7 |
8 | The interface attempts to be flexible enough so you can put any XYZ on it.
9 | You can either implement them under the interface or simply wrap some existing Python code.
10 | See [Extend Tensorpack](index.html#extend-tensorpack)
11 | for more details.
12 |
13 | If you think:
14 | 1. The framework has limitation in its interface so your XYZ cannot be supported, OR
15 | 2. Your XYZ is super common / very well-defined / very useful, so it would be nice to include it.
16 |
17 | Then it is a good time to open an issue.
18 |
19 | ## How to print/dump intermediate results in training
20 |
21 | 1. Learn `tf.Print`.
22 |
23 | 2. Know [DumpTensors](../modules/callbacks.html#tensorpack.callbacks.DumpTensors[]),
24 | [ProcessTensors](../modules/callbacks.html#tensorpack.callbacks.ProcessTensors) callbacks.
25 | And it's also easy to write your own version of them.
26 |
27 | 3. The [ProgressBar](../modules/callbacks.html#tensorpack.callbacks.ProgressBar)
28 | callback can print some scalar statistics, though not enabled by default.
29 |
30 | 4. Read [Summary and Logging](summary.html) for more options on logging.
31 |
32 | ## How to freeze some variables in training
33 |
34 | 1. Learn `tf.stop_gradient`. You can simply use `tf.stop_gradient` in your model code in many situations (e.g. to freeze first several layers).
35 |
36 | 2. [varreplace.freeze_variables](../modules/tfutils.html#tensorpack.tfutils.varreplace.freeze_variables) returns a context where variables are freezed.
37 | It is implemented by `custom_getter` argument of `tf.variable_scope` -- learn it to gain more control over what & how variables are freezed.
38 |
39 | 3. [ScaleGradient](../modules/tfutils.html#tensorpack.tfutils.gradproc.ScaleGradient) can be used to set the gradients of some variables to 0.
40 | But it may be slow, since variables still have gradients.
41 |
42 | Note that the above methods only prevent variables being updated by SGD.
43 | Some variables may be updated by other means,
44 | e.g., BatchNorm statistics are updated through the `UPDATE_OPS` collection and the [RunUpdateOps](../modules/callbacks.html#tensorpack.callbacks.RunUpdateOps) callback.
45 |
46 | ## My training is slow!
47 |
48 | Checkout the [Performance Tuning tutorial](performance-tuning.html)
49 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/index.rst:
--------------------------------------------------------------------------------
1 |
2 | Tutorials
3 | ---------------------
4 |
5 | Introduction
6 | =============
7 |
8 | .. include:: intro.rst
9 |
10 | User Tutorials
11 | ========================
12 |
13 | .. toctree::
14 | :maxdepth: 1
15 |
16 | dataflow
17 | input-source
18 | symbolic
19 | trainer
20 | training-interface
21 | callback
22 | save-load
23 | summary
24 | inference
25 | faq
26 |
27 |
28 | Performance
29 | ============
30 |
31 | .. toctree::
32 | :maxdepth: 1
33 |
34 | efficient-dataflow
35 | performance-tuning
36 |
37 |
38 | Extend Tensorpack
39 | ==================
40 |
41 | .. toctree::
42 | :maxdepth: 1
43 |
44 | extend/dataflow
45 | extend/augmentor
46 | extend/model
47 | extend/callback
48 | extend/trainer
49 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/inference.md:
--------------------------------------------------------------------------------
1 |
2 | # Inference
3 |
4 | ## Inference During Training
5 |
6 | There are two ways to do inference during training.
7 |
8 | 1. The easiest way is to write a callback, and use
9 | [self.trainer.get_predictor()](../modules/modules/train.html#tensorpack.train.TowerTrainer.get_predictor)
10 | to get a callable under inference mode.
11 | See [Write a Callback](extend/callback.html).
12 |
13 | 2. If your inference follows the paradigm of:
14 | "fetch some tensors for each input, and aggregate the results".
15 | You can use the `InferenceRunner` interface with some `Inferencer`.
16 | This will further support prefetch & data-parallel inference.
17 | More details to come.
18 |
19 | In both methods, your tower function will be called again, with `TowerContext.is_training==False`.
20 | You can build a different graph using this predicate.
21 |
22 | ## Inference After Training
23 |
24 | Tensorpack doesn't care what happened after training.
25 | It saves models to standard checkpoint format, plus a metagraph protobuf file.
26 | They are sufficient to use with whatever deployment methods TensorFlow supports.
27 | But you'll need to read TF docs and do it on your own.
28 |
29 | Please note that, the metagraph saved during training is the training graph.
30 | But sometimes you need a different one for inference.
31 | For example, you may need a different data layout for CPU inference,
32 | or you may need placeholders in the inference graph, or the training graph contains multi-GPU replication
33 | which you want to remove. In fact, directly import a huge training metagraph is usually not a good idea for deployment.
34 |
35 | In this case, you can always construct a new graph by simply:
36 | ```python
37 | a, b = tf.placeholder(...), tf.placeholder(...)
38 | # call symbolic functions on a, b
39 | ```
40 |
41 | The only tool tensorpack has for after-training inference is [OfflinePredictor](../modules/predict.html#tensorpack.predict.OfflinePredictor),
42 | a simple function to build the graph and return a callable for you.
43 | It is mainly for quick demo purposes.
44 | It only runs inference on numpy arrays, therefore may not be the most efficient way.
45 | Check out examples and docs for its usage.
46 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/intro.rst:
--------------------------------------------------------------------------------
1 |
2 | What is tensorpack?
3 | ~~~~~~~~~~~~~~~~~~~
4 |
5 | Tensorpack is a **training interface** based on TensorFlow, which means:
6 | you'll use mostly tensorpack high-level APIs to do training, rather than TensorFlow low-level APIs.
7 |
8 | Why tensorpack?
9 | ~~~~~~~~~~~~~~~~~~~
10 |
11 | TensorFlow is powerful, but at the same time too complicated for a lot of people.
12 | Users will have to worry a lot about things unrelated to the model, especially when **speed** is a concern.
13 | Code written with low-level APIs or other existing high-level wrappers is often suboptimal in speed.
14 | Even a lot of official TensorFlow examples are written for simplicity rather than efficiency,
15 | which as a result makes people think TensorFlow is slow.
16 |
17 | The `official TensorFlow benchmark `_ said this in their README:
18 |
19 | These models are designed for performance. For models that have clean and easy-to-read implementations, see the TensorFlow Official Models.
20 |
21 | which seems to suggest that you cannot have performance and ease-of-use together.
22 | However you can have them both in tensorpack.
23 | Tensorpack uses TensorFlow efficiently, and hides performance details under its APIs.
24 | You no longer need to write
25 | data prefetch, multi-GPU replication, device placement, variables synchronization -- anything that's unrelated to the model itself.
26 | You still need to understand graph and learn to write models with TF, but performance is all taken care of by tensorpack.
27 |
28 | A High Level Glance
29 | ~~~~~~~~~~~~~~~~~~~
30 |
31 | .. image:: https://user-images.githubusercontent.com/1381301/29187907-2caaa740-7dc6-11e7-8220-e20ca52c3ca6.png
32 |
33 |
34 | * ``DataFlow`` is a library to load data efficiently in Python.
35 | Apart from DataFlow, native TF operators can be used for data loading as well.
36 | They will eventually be wrapped under the same ``InputSource`` interface and go through prefetching.
37 |
38 | * You can use any TF-based symbolic function library to define a model, including
39 | a small set of functions within tensorpack. ``ModelDesc`` is an interface to connect the model with the
40 | ``InputSource`` interface.
41 |
42 | * Tensorpack trainers manage the training loops for you.
43 | They also include data parallel logic for multi-GPU or distributed training.
44 | At the same time, you have the power of customization through callbacks.
45 |
46 | * Callbacks are like ``tf.train.SessionRunHook``, or plugins. During training,
47 | everything you want to do other than the main iterations can be defined through callbacks and easily reused.
48 |
49 | * All the components, though work perfectly together, are highly decorrelated: you can:
50 |
51 | * Use DataFlow alone as a data loading library, without tensorfow at all.
52 | * Use tensorpack to build the graph with multi-GPU or distributed support,
53 | then train it with your own loops.
54 | * Build the graph on your own, and train it with tensorpack callbacks.
55 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/save-load.md:
--------------------------------------------------------------------------------
1 |
2 | # Save and Load models
3 |
4 | ## Work with TF Checkpoint
5 |
6 | The `ModelSaver` callback saves the model to `logger.get_logger_dir()`,
7 | in TensorFlow checkpoint format.
8 | One checkpoint typically includes a `.data-xxxxx` file and a `.index` file.
9 | Both are necessary.
10 |
11 | `tf.train.NewCheckpointReader` is the best tool to parse TensorFlow checkpoint.
12 | We have two example scripts to demo its usage, but read [TF docs](https://www.tensorflow.org/api_docs/python/tf/train/NewCheckpointReader) for details.
13 |
14 | [scripts/ls-checkpoint.py](../scripts/ls-checkpoint.py)
15 | demos how to print all variables and their shapes in a checkpoint.
16 |
17 | [scripts/dump-model-params.py](../scripts/dump-model-params.py) can be used to remove unnecessary variables in a checkpoint.
18 | It takes a metagraph file (which is also saved by `ModelSaver`) and only saves variables that the model needs at inference time.
19 | It can dump the model to a `var-name: value` dict saved in npz format.
20 |
21 | ## Load a Model
22 |
23 | Model loading (in either training or testing) is through the `session_init` interface.
24 | Currently there are two ways a session can be restored:
25 | [session_init=SaverRestore(...)](../modules/tfutils.html#tensorpack.tfutils.sessinit.SaverRestore)
26 | which restores a TF checkpoint,
27 | or [session_init=DictRestore(...)](../modules/tfutils.html#tensorpack.tfutils.sessinit.DictRestore) which restores a dict
28 | ([get_model_loader](../modules/tfutils.html#tensorpack.tfutils.sessinit.get_model_loader)
29 | is a small helper to decide which one to use from a file name).
30 | To load multiple models, use [ChainInit](../modules/tfutils.html#tensorpack.tfutils.sessinit.ChainInit).
31 |
32 |
33 | Variable restoring is completely based on __name match__ between
34 | variables in the current graph and variables in the `session_init` initializer.
35 | Variables that appear in only one side will be printed as warning.
36 |
37 | ## Transfer Learning
38 | Therefore, transfer learning is trivial.
39 | If you want to load some model, just use the same variable names.
40 | If you want to re-train some layer, just rename it.
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/summary.md:
--------------------------------------------------------------------------------
1 | # Summary and Logging
2 |
3 | During training, everything other than the iterations are executed through callbacks.
4 | This tutorial will explain how summaries and logging are handled in callbacks and how can you customize them.
5 | The default logging behavior should be good enough for normal use cases, so you may skip this tutorial.
6 |
7 | ### TensorFlow Summaries
8 |
9 | This is how TensorFlow summaries eventually get logged/saved/printed:
10 |
11 | 1. __What to Log__: When you call `tf.summary.xxx` in your graph code, TensorFlow adds an op to
12 | `tf.GraphKeys.SUMMARIES` collection (by default).
13 | 2. __When to Log__: [MergeAllSummaries](../modules/callbacks.html#tensorpack.callbacks.MergeAllSummaries)
14 | callback is in the [default callbacks](../modules/train.html#tensorpack.train.DEFAULT_CALLBACKS).
15 | It runs ops in the `SUMMARIES` collection (by default) every epoch (by default),
16 | and writes results to the monitors.
17 | 3. __Where to Log__:
18 | Several monitors are [enabled by default](../modules/train.html#tensorpack.train.DEFAULT_MONITORS).
19 | * A [TFEventWriter](../modules/callbacks.html#tensorpack.callbacks.TFEventWriter)
20 | writes things to an event file used by tensorboard.
21 | * A [ScalarPrinter](../modules/callbacks.html#tensorpack.callbacks.ScalarPrinter)
22 | prints all scalars in your terminal.
23 | * A [JSONWriter](../modules/callbacks.html#tensorpack.callbacks.JSONWriter)
24 | saves scalars to a JSON file.
25 |
26 | All the "what, when, where" can be customized in either the graph or with the callbacks/monitors setting.
27 |
28 | Since TF summaries are evaluated infrequently (every epoch) by default, if the content is data-dependent, the values
29 | could have high variance. To address this issue, you can:
30 | 1. Change "When to Log": log more frequently, but note that certain summaries can be expensive to
31 | log. You may want to use a separate collection for frequent logging.
32 | 2. Change "What to Log": you can call
33 | [tfutils.summary.add_moving_summary](../modules/tfutils.html#tensorpack.tfutils.summary.add_moving_summary)
34 | on scalar tensors, which will summarize the moving average of those scalars, instead of their instant values.
35 | The moving averages are maintained by the
36 | [MovingAverageSummary](../modules/callbacks.html#tensorpack.callbacks.MovingAverageSummary)
37 | callback (enabled by default).
38 |
39 | ### Other Logging Data
40 |
41 | Besides TensorFlow summaries,
42 | a callback can also write other data to the monitor backend anytime once the training has started,
43 | by `self.trainer.monitors.put_xxx`.
44 | As long as the type of data is supported, the data will be dispatched to and logged to the same place.
45 |
46 | As a result, tensorboard will show not only summaries in the graph, but also your custom data.
47 | For example, a precise validation error often needs to be computed manually, outside the TensorFlow graph.
48 | With a uniform monitor backend, this number will show up in tensorboard as well.
49 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/symbolic.md:
--------------------------------------------------------------------------------
1 |
2 | # Symbolic Layers
3 |
4 | While you can use other symbolic libraries,
5 | tensorpack also contains a small collection of common model primitives,
6 | such as conv/deconv, fc, bn, pooling layers.
7 | Using the tensorpack implementations, you can also benefit from `argscope` and `LinearWrap` to
8 | simplify the code.
9 |
10 | Note that these layers were written because there were no other alternatives back at that time.
11 | Now, these layers actually call `tf.layers` directly.
12 | You can just use `tf.layers` as long as it fits your need.
13 |
14 | ### argscope and LinearWrap
15 | `argscope` gives you a context with default arguments.
16 | `LinearWrap` is a syntax sugar to simplify building "linear structure" models.
17 |
18 | The following code:
19 | ```python
20 | with argscope(Conv2D, filters=32, kernel_size=3, activation=tf.nn.relu):
21 | l = (LinearWrap(image) # the starting brace is only for line-breaking
22 | .Conv2D('conv0')
23 | .MaxPooling('pool0', 2)
24 | .Conv2D('conv1', padding='SAME')
25 | .Conv2D('conv2', kernel_size=5)
26 | .FullyConnected('fc0', 512, activation=tf.nn.relu)
27 | .Dropout('dropout', rate=0.5)
28 | .tf.multiply(0.5)
29 | .apply(func, *args, **kwargs)
30 | .FullyConnected('fc1', units=10, activation=tf.identity)())
31 | ```
32 | is equivalent to:
33 | ```
34 | l = Conv2D('conv0', image, 32, 3, activation=tf.nn.relu)
35 | l = MaxPooling('pool0', l, 2)
36 | l = Conv2D('conv1', l, 32, 3, padding='SAME', activation=tf.nn.relu)
37 | l = Conv2D('conv2', l, 32, 5, activation=tf.nn.relu)
38 | l = FullyConnected('fc0', l, 512, activation=tf.nn.relu)
39 | l = Dropout('dropout', l, rate=0.5)
40 | l = tf.multiply(l, 0.5)
41 | l = func(l, *args, **kwargs)
42 | l = FullyConnected('fc1', l, 10, activation=tf.identity)
43 | ```
44 |
45 | ### Access Relevant Tensors
46 |
47 | The variables inside the layer will be named `name/W`, `name/b`, etc.
48 | See the API documentation of each layer for details.
49 | When building the graph, you can access the variables like this:
50 | ```python
51 | l = Conv2D('conv1', l, 32, 3)
52 | print(l.variables.W)
53 | print(l.variables.b)
54 | ```
55 | But note that this is a hacky way and may not work with future versions of TensorFlow.
56 | Also this method doesn't work with LinearWrap, and cannot access the variables created by an activation function.
57 |
58 | The output of a layer is usually named `name/output` unless documented differently in the API.
59 | You can always print a tensor to see its name.
60 |
61 | ### Use Models outside Tensorpack
62 |
63 | You can use tensorpack models alone as a simple symbolic function library.
64 | To do this, just enter a [TowerContext](../modules/tfutils.html#tensorpack.tfutils.TowerContext)
65 | when you define your model:
66 | ```python
67 | with TowerContext('', is_training=True):
68 | # call any tensorpack layer
69 | ```
70 |
71 | Some layers (in particular ``BatchNorm``) has different train/test time behavior which is controlled
72 | by ``TowerContext``. If you need to use the tensorpack version of them in test time, you'll need to create the ops for them under another context.
73 | ```python
74 | # Open a `reuse=True` variable scope here if you're sharing variables, then:
75 | with TowerContext('some_name_or_empty_string', is_training=False):
76 | # build the graph again
77 | ```
78 |
79 | ### Use Other Symbolic Libraries within Tensorpack
80 |
81 | When defining the model you can construct the graph using whatever library you feel comfortable with.
82 |
83 | Usually, slim/tflearn/tensorlayer are just symbolic function wrappers, calling them is nothing different
84 | from calling `tf.add`. You may need to be careful how regularizations/BN updates are supposed
85 | to be handled in those libraries, though.
86 |
87 | It is a bit different to use sonnet/Keras.
88 | sonnet/Keras manages the variable scope by their own model classes, and calling their symbolic functions
89 | always creates new variable scope. See the [Keras example](../examples/keras) for how to use it within tensorpack.
90 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/trainer.md:
--------------------------------------------------------------------------------
1 |
2 | # Trainers
3 |
4 | Tensorpack trainers contain logic of:
5 |
6 | 1. Building the graph.
7 | 2. Running the iterations (with callbacks).
8 |
9 | Usually you won't touch these methods directly, but use
10 | [higher-level interface](training-interface.html) on trainers.
11 | You'll only need to __select__ what trainer to use.
12 | But some basic knowledge of how they work is useful:
13 |
14 | ### Tower Trainer
15 |
16 | Following the terminology in TensorFlow,
17 | a __tower function__ is a callable that takes input tensors and adds __one replicate__ of the model to the graph.
18 |
19 | Most types of neural-network training could fall into this category.
20 | All trainers in tensorpack is a subclass of [TowerTrainer](../modules/train.html#tensorpack.train.TowerTrainer).
21 | The concept of tower is used mainly to support:
22 |
23 | 1. Data-parallel multi-GPU training, where a replicate is built on each GPU.
24 | 2. Graph construction for inference, where a replicate is built under inference mode.
25 |
26 | You'll provide a tower function to use `TowerTrainer`.
27 | The function needs to follow some conventions:
28 |
29 | 1. It will always be called under a `TowerContext`.
30 | which will contain information about reuse, training/inference, scope name, etc.
31 | 2. __It might get called multiple times__ for data-parallel training or inference.
32 | 3. To respect variable reuse, use `tf.get_variable` instead of
33 | `tf.Variable` in the function, unless you want to force creation of new variables.
34 |
35 | In particular, when working with the `ModelDesc` interface, its `build_graph` method will be the tower function.
36 |
37 | ### MultiGPU Trainers
38 |
39 | For data-parallel multi-GPU training, different [multi-GPU trainers](../modules/train.html)
40 | implement different parallel logic.
41 | They take care of device placement, gradient averaging and synchronoization
42 | in the efficient way and all reach the same performance as the
43 | [official TF benchmarks](https://www.tensorflow.org/performance/benchmarks).
44 | It takes only one line of code change to use them.
45 |
46 | Note some __common problems__ when using these trainers:
47 |
48 | 1. In each iteration, all GPUs (all replicates of the model) take tensors from the `InputSource`,
49 | instead of take one for all and split.
50 | So the total batch size would become ``(batch size of InputSource/DataFlow) * #GPU``.
51 |
52 | Splitting a tensor for data-parallel training makes no sense at all, only to put unnecessary shape constraints on the data.
53 | By letting each GPU train on its own input tensors, they can train on inputs of different shapes simultaneously.
54 |
55 | 2. The tower function (your model code) will get called multipile times.
56 | You'll need to be very careful when modifying global states in those functions, e.g. adding ops to TF collections.
57 |
--------------------------------------------------------------------------------
/tensorpack-installed/docs/tutorial/training-interface.md:
--------------------------------------------------------------------------------
1 |
2 | # Training Interface
3 |
4 | Tensorpack trainers have a verbose interface for maximum flexibility.
5 | Then, there are interfaces built on top of trainers to simplify the use,
6 | when you don't want to customize too much.
7 |
8 | ### With ModelDesc and TrainConfig
9 |
10 | This is an interface that's most familiar to old tensorpack users,
11 | and is used for single-cost tasks only.
12 | A lot of examples are written in this interface.
13 |
14 | [SingleCost trainers](../modules/train.html#tensorpack.train.SingleCostTrainer)
15 | expects 4 arguments to setup the graph: `InputDesc`, `InputSource`, get_cost function, and an optimizer.
16 | `ModelDesc` describes a model by packing 3 of them together into one object:
17 |
18 | ```python
19 | class MyModel(ModelDesc):
20 | def inputs(self):
21 | return [tf.placeholder(dtype, shape, name), tf.placeholder(dtype, shape, name), ... ]
22 |
23 | def _build_graph(self, inputs):
24 | tensorA, tensorB = inputs
25 | # build the graph
26 | self.cost = xxx # define the cost tensor
27 |
28 | def _get_optimizer(self):
29 | return tf.train.GradientDescentOptimizer(0.1)
30 | ```
31 |
32 | `_get_inputs` should define the metainfo of all the inputs your graph will take to build.
33 |
34 | `_build_graph` takes a list of `inputs` tensors which will match `_get_inputs`.
35 |
36 | You can use any symbolic functions in `_build_graph`, including TensorFlow core library
37 | functions and other symbolic libraries.
38 | `_build_graph` will be the tower function,
39 | so you need to follow [some rules](trainer.md#tower-trainer).
40 | You also need to set `self.cost` in this function.
41 |
42 | After defining such a model, use it with `TrainConfig` and `launch_train_with_config`:
43 |
44 | ```python
45 | config = TrainConfig(
46 | model=MyModel()
47 | dataflow=my_dataflow,
48 | # data=my_inputsource, # alternatively, use a customized InputSource
49 | callbacks=[...], # some default callbacks are automatically applied
50 | # some default monitors are automatically applied
51 | steps_per_epoch=300, # default to the size of your InputSource/DataFlow
52 | )
53 |
54 | trainer = SomeTrainer()
55 | # trainer = SyncMultiGPUTrainerParameterServer(8)
56 | launch_train_with_config(config, trainer)
57 | ```
58 | See the docs of
59 | [TrainConfig](../modules/train.html#tensorpack.train.TrainConfig)
60 | and
61 | [launch_train_with_config](../modules/train.html#tensorpack.train.launch_train_with_config)
62 | for detailed functionalities.
63 |
64 | ### Raw Trainer Interface
65 |
66 | To get a lower-level control, you can also access methods of trainer directly:
67 |
68 | __Build__ the graph: For general trainer, build the graph by yourself.
69 | For single-cost trainer, build the graph by
70 | [SingleCostTrainer.setup_graph](../modules/train.html#tensorpack.train.SingleCostTrainer.setup_graph).
71 |
72 | __Run__ the iterations: Call
73 | [Trainer.train()](../modules/train.html#tensorpack.train.Trainer.train),
74 | or
75 | [Trainer.train_with_defaults()](../modules/train.html#tensorpack.train.Trainer.train_with_defaults)
76 | which applies some defaults options for normal use cases.
77 |
78 | Read the API documentation for detail usage.
79 |
--------------------------------------------------------------------------------
/tensorpack-installed/readthedocs.yml:
--------------------------------------------------------------------------------
1 | formats:
2 | - none
3 | requirements_file: docs/requirements.txt
4 |
5 | python:
6 | version: 3.5
7 |
--------------------------------------------------------------------------------
/tensorpack-installed/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | six
3 | termcolor>=1.1
4 | tabulate>=0.7.7
5 | tqdm>4.11.1
6 | pyarrow>=0.9.0
7 | pyzmq>=16
8 | subprocess32; python_version < '3.0'
9 | functools32; python_version < '3.0'
10 |
--------------------------------------------------------------------------------
/tensorpack-installed/scripts/checkpoint-manipulate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: checkpoint-manipulate.py
4 | # Author: Yuxin Wu
5 |
6 |
7 | import numpy as np
8 | from tensorpack.tfutils.varmanip import dump_chkpt_vars
9 | from tensorpack.utils import logger
10 | import argparse
11 |
12 |
13 | if __name__ == '__main__':
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument('model')
16 | parser.add_argument('--dump', help='dump to an npz file')
17 | parser.add_argument('--shell', action='store_true', help='start a shell with the params')
18 | args = parser.parse_args()
19 |
20 | if args.model.endswith('.npy'):
21 | params = np.load(args.model, encoding='latin1').item()
22 | elif args.model.endswith('.npz'):
23 | params = dict(np.load(args.model))
24 | else:
25 | params = dump_chkpt_vars(args.model)
26 | logger.info("Variables in the model:")
27 | logger.info(str(params.keys()))
28 |
29 | if args.dump:
30 | assert args.dump.endswith('.npz'), args.dump
31 | np.save(args.dump, **params)
32 |
33 | if args.shell:
34 | # params is a dict. play with it
35 | import IPython as IP
36 | IP.embed(config=IP.terminal.ipapp.load_default_config())
37 |
--------------------------------------------------------------------------------
/tensorpack-installed/scripts/checkpoint-prof.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: checkpoint-prof.py
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 | from tensorpack import get_default_sess_config, get_op_tensor_name
8 | from tensorpack.utils import logger
9 | from tensorpack.tfutils.sessinit import get_model_loader
10 | import argparse
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('--model', help='model file')
15 | parser.add_argument('--meta', help='metagraph proto file. Will be used to load the graph', required=True)
16 | parser.add_argument('-i', '--input', nargs='+', help='list of input tensors with their shapes.')
17 | parser.add_argument('-o', '--output', nargs='+', help='list of output tensors')
18 | parser.add_argument('--warmup', help='warmup iterations', type=int, default=5)
19 | parser.add_argument('--print-flops', action='store_true')
20 | parser.add_argument('--print-params', action='store_true')
21 | parser.add_argument('--print-timing', action='store_true')
22 | args = parser.parse_args()
23 |
24 | tf.train.import_meta_graph(args.meta, clear_devices=True)
25 | G = tf.get_default_graph()
26 | with tf.Session(config=get_default_sess_config()) as sess:
27 | init = get_model_loader(args.model)
28 | init.init(sess)
29 |
30 | feed = {}
31 | for inp in args.input:
32 | inp = inp.split('=')
33 | name = get_op_tensor_name(inp[0].strip())[1]
34 | shape = list(map(int, inp[1].strip().split(',')))
35 | tensor = G.get_tensor_by_name(name)
36 | logger.info("Feeding shape ({}) to tensor {}".format(','.join(map(str, shape)), name))
37 | feed[tensor] = np.random.rand(*shape)
38 |
39 | fetches = []
40 | for name in args.output:
41 | name = get_op_tensor_name(name)[1]
42 | fetches.append(G.get_tensor_by_name(name))
43 | logger.info("Fetching tensors: {}".format(', '.join([k.name for k in fetches])))
44 |
45 | for _ in range(args.warmup):
46 | sess.run(fetches, feed_dict=feed)
47 |
48 | opt = tf.RunOptions()
49 | opt.trace_level = tf.RunOptions.FULL_TRACE
50 | meta = tf.RunMetadata()
51 | sess.run(fetches, feed_dict=feed, options=opt, run_metadata=meta)
52 |
53 | if args.print_flops:
54 | tf.contrib.tfprof.model_analyzer.print_model_analysis(
55 | G, run_meta=meta,
56 | tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
57 |
58 | if args.print_params:
59 | tf.contrib.tfprof.model_analyzer.print_model_analysis(
60 | G, run_meta=meta,
61 | tfprof_options=tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
62 |
63 | if args.print_timing:
64 | tf.contrib.tfprof.model_analyzer.print_model_analysis(
65 | G, run_meta=meta,
66 | tfprof_options=tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY)
67 |
--------------------------------------------------------------------------------
/tensorpack-installed/scripts/dump-model-params.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: dump-model-params.py
4 | # Author: Yuxin Wu
5 |
6 | import numpy as np
7 | import six
8 | import argparse
9 | import os
10 | import tensorflow as tf
11 |
12 | from tensorpack.tfutils import varmanip
13 | from tensorpack.tfutils.common import get_op_tensor_name
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser(
17 | description='Keep only TRAINABLE and MODEL variables in a checkpoint.')
18 | parser.add_argument('--meta', help='metagraph file', required=True)
19 | parser.add_argument(dest='input', help='input model file, has to be a TF checkpoint')
20 | parser.add_argument(dest='output', help='output model file, can be npz or TF checkpoint')
21 | args = parser.parse_args()
22 |
23 | # this script does not need GPU
24 | os.environ['CUDA_VISIBLE_DEVICES'] = ''
25 |
26 | tf.train.import_meta_graph(args.meta, clear_devices=True)
27 |
28 | # loading...
29 | if args.input.endswith('.npz'):
30 | dic = np.load(args.input)
31 | else:
32 | dic = varmanip.load_chkpt_vars(args.input)
33 | dic = {get_op_tensor_name(k)[1]: v for k, v in six.iteritems(dic)}
34 |
35 | # save variables that are GLOBAL, and either TRAINABLE or MODEL
36 | var_to_dump = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
37 | var_to_dump.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
38 | assert len(set(var_to_dump)) == len(var_to_dump), "TRAINABLE and MODEL variables have duplication!"
39 | globvarname = [k.name for k in tf.global_variables()]
40 | var_to_dump = set([k.name for k in var_to_dump if k.name in globvarname])
41 |
42 | for name in var_to_dump:
43 | assert name in dic, "Variable {} not found in the model!".format(name)
44 |
45 | dic_to_dump = {k: v for k, v in six.iteritems(dic) if k in var_to_dump}
46 | varmanip.save_chkpt_vars(dic_to_dump, args.output)
47 |
--------------------------------------------------------------------------------
/tensorpack-installed/scripts/ls-checkpoint.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: ls-checkpoint.py
4 | # Author: Yuxin Wu
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import six
9 | import sys
10 | import pprint
11 |
12 | from tensorpack.tfutils.varmanip import get_checkpoint_path
13 |
14 | if __name__ == '__main__':
15 | fpath = sys.argv[1]
16 |
17 | if fpath.endswith('.npy'):
18 | params = np.load(fpath, encoding='latin1').item()
19 | dic = {k: v.shape for k, v in six.iteritems(params)}
20 | elif fpath.endswith('.npz'):
21 | params = dict(np.load(fpath))
22 | dic = {k: v.shape for k, v in six.iteritems(params)}
23 | else:
24 | path = get_checkpoint_path(sys.argv[1])
25 | reader = tf.train.NewCheckpointReader(path)
26 | dic = reader.get_variable_to_shape_map()
27 | pprint.pprint(dic)
28 |
--------------------------------------------------------------------------------
/tensorpack-installed/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | author = TensorPack contributors
3 | author-email = ppwwyyxxc@gmail.com
4 | url = https://github.com/ppwwyyxx/tensorpack
5 | keywords = tensorflow, deep learning, neural network
6 | license = Apache
7 |
8 | [options]
9 | zip_safe = False # dataset and __init__ use file
10 | packages = find: # will call find_packages()
11 |
12 | [wheel]
13 | universal = 1
14 |
--------------------------------------------------------------------------------
/tensorpack-installed/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 | version = int(setuptools.__version__.split('.')[0])
3 | assert version > 30, "tensorpack installation requires setuptools > 30"
4 | from setuptools import setup
5 | import os
6 | import shutil
7 | import sys
8 |
9 | # setup metainfo
10 | CURRENT_DIR = os.path.dirname(__file__)
11 | libinfo_py = os.path.join(CURRENT_DIR, 'tensorpack/libinfo.py')
12 | exec(open(libinfo_py, "rb").read())
13 |
14 | # produce rst readme for pypi
15 | try:
16 | import pypandoc
17 | long_description = pypandoc.convert_file('README.md', 'rst')
18 | except ImportError:
19 | long_description = open('README.md').read()
20 |
21 | # configure requirements
22 | reqfile = os.path.join(CURRENT_DIR, 'requirements.txt')
23 | req = [x.strip() for x in open(reqfile).readlines()]
24 |
25 | setup(
26 | name='tensorpack',
27 | version=__version__,
28 | description='Neural Network Toolbox on TensorFlow',
29 | long_description=long_description,
30 |
31 | install_requires=req,
32 | tests_require=['flake8', 'scikit-image'],
33 | extras_require={
34 | 'all': ['pillow', 'scipy', 'h5py', 'lmdb>=0.92', 'matplotlib', 'scikit-learn'],
35 | 'all: python_version < "3.0"': ['tornado']
36 | },
37 |
38 | #include_package_data=True,
39 | #package_data={'tensorpack': []},
40 | )
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File: __init__.py
3 |
4 |
5 | import os as _os
6 |
7 | from tensorpack.libinfo import __version__, _HAS_TF
8 |
9 | from tensorpack.utils import *
10 | from tensorpack.dataflow import *
11 |
12 | # dataflow can be used alone without installing tensorflow
13 | # TODO maybe separate dataflow to a new project if it's good enough
14 |
15 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
16 | STATICA_HACK = True
17 | globals()['kcah_acitats'[::-1].upper()] = _HAS_TF
18 | if STATICA_HACK:
19 | from tensorpack.models import *
20 |
21 | from tensorpack.callbacks import *
22 | from tensorpack.tfutils import *
23 |
24 | # Default to v2
25 | if _os.environ.get('TENSORPACK_TRAIN_API', 'v2') == 'v2':
26 | from tensorpack.train import *
27 | else:
28 | from tensorpack.trainv1 import *
29 | from tensorpack.graph_builder import InputDesc, ModelDesc, ModelDescBase
30 | from tensorpack.input_source import *
31 | from tensorpack.predict import *
32 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 |
5 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
6 | STATICA_HACK = True
7 | globals()['kcah_acitats'[::-1].upper()] = False
8 | if STATICA_HACK:
9 | from .base import *
10 | from .concurrency import *
11 | from .graph import *
12 | from .group import *
13 | from .hooks import *
14 | from .inference import *
15 | from .inference_runner import *
16 | from .monitor import *
17 | from .param import *
18 | from .prof import *
19 | from .saver import *
20 | from .misc import *
21 | from .steps import *
22 | from .summary import *
23 | from .trigger import *
24 |
25 |
26 | from pkgutil import iter_modules
27 | import os
28 |
29 |
30 | __all__ = []
31 |
32 |
33 | def _global_import(name):
34 | p = __import__(name, globals(), locals(), level=1)
35 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
36 | if lst:
37 | del globals()[name]
38 | for k in lst:
39 | if not k.startswith('__'):
40 | globals()[k] = p.__dict__[k]
41 | __all__.append(k)
42 |
43 |
44 | _CURR_DIR = os.path.dirname(__file__)
45 | for _, module_name, _ in iter_modules(
46 | [_CURR_DIR]):
47 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
48 | if not os.path.isfile(srcpath):
49 | continue
50 | if not module_name.startswith('_'):
51 | _global_import(module_name)
52 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/concurrency.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: concurrency.py
4 |
5 |
6 | import multiprocessing as mp
7 | from .base import Callback
8 | from ..utils.concurrency import start_proc_mask_signal, StoppableThread
9 | from ..utils import logger
10 |
11 | __all__ = ['StartProcOrThread']
12 |
13 |
14 | class StartProcOrThread(Callback):
15 | """
16 | Start some threads or processes before training.
17 | """
18 |
19 | _chief_only = False
20 |
21 | def __init__(self, startable, stop_at_last=True):
22 | """
23 | Args:
24 | startable (list): list of processes or threads which have ``start()`` method.
25 | Can also be a single instance of process of thread.
26 | stop_at_last (bool): whether to stop the processes or threads
27 | after training. It will use :meth:`Process.terminate()` or
28 | :meth:`StoppableThread.stop()`, but will do nothing on normal
29 | `threading.Thread` or other startable objects.
30 | """
31 | if not isinstance(startable, list):
32 | startable = [startable]
33 | self._procs_threads = startable
34 | self._stop_at_last = stop_at_last
35 |
36 | def _before_train(self):
37 | logger.info("Starting " +
38 | ', '.join([k.name for k in self._procs_threads]) + ' ...')
39 | # avoid sigint get handled by other processes
40 | start_proc_mask_signal(self._procs_threads)
41 |
42 | def _after_train(self):
43 | if not self._stop_at_last:
44 | return
45 | for k in self._procs_threads:
46 | if not k.is_alive():
47 | continue
48 | if isinstance(k, mp.Process):
49 | logger.info("Stopping {} ...".format(k.name))
50 | k.terminate()
51 | k.join(5.0)
52 | if k.is_alive():
53 | logger.error("Cannot join process {}.".format(k.name))
54 | elif isinstance(k, StoppableThread):
55 | logger.info("Stopping {} ...".format(k.name))
56 | k.stop()
57 | k.join(5.0)
58 | if k.is_alive():
59 | logger.error("Cannot join thread {}.".format(k.name))
60 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/group.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: group.py
3 |
4 |
5 | import tensorflow as tf
6 | from contextlib import contextmanager
7 | import time
8 | import traceback
9 |
10 | from .base import Callback
11 | from .hooks import CallbackToHook
12 | from ..utils import logger
13 | from ..utils.utils import humanize_time_delta
14 |
15 | __all__ = ['Callbacks']
16 |
17 |
18 | class CallbackTimeLogger(object):
19 | def __init__(self):
20 | self.times = []
21 | self.tot = 0
22 |
23 | def add(self, name, time):
24 | self.tot += time
25 | self.times.append((name, time))
26 |
27 | @contextmanager
28 | def timed_callback(self, name):
29 | s = time.time()
30 | yield
31 | self.add(name, time.time() - s)
32 |
33 | def log(self):
34 |
35 | """ log the time of some heavy callbacks """
36 | if self.tot < 3:
37 | return
38 | msgs = []
39 | for name, t in self.times:
40 | if t / self.tot > 0.3 and t > 1:
41 | msgs.append(name + ": " + humanize_time_delta(t))
42 | logger.info(
43 | "Callbacks took {:.3f} sec in total. {}".format(
44 | self.tot, '; '.join(msgs)))
45 |
46 |
47 | class Callbacks(Callback):
48 | """
49 | A container to hold all callbacks, and trigger them iteratively.
50 | Note that it does nothing to before_run/after_run.
51 | """
52 |
53 | def __init__(self, cbs):
54 | """
55 | Args:
56 | cbs(list): a list of :class:`Callback` instances.
57 | """
58 | # check type
59 | for cb in cbs:
60 | assert isinstance(cb, Callback), cb.__class__
61 | self.cbs = cbs
62 |
63 | def _setup_graph(self):
64 | with tf.name_scope(None): # clear the name scope
65 | for cb in self.cbs:
66 | cb.setup_graph(self.trainer)
67 |
68 | def _before_train(self):
69 | for cb in self.cbs:
70 | cb.before_train()
71 |
72 | def _after_train(self):
73 | for cb in self.cbs:
74 | # make sure callbacks are properly finalized
75 | try:
76 | cb.after_train()
77 | except Exception:
78 | traceback.print_exc()
79 |
80 | def get_hooks(self):
81 | return [CallbackToHook(cb) for cb in self.cbs]
82 |
83 | def trigger_step(self):
84 | for cb in self.cbs:
85 | cb.trigger_step()
86 |
87 | def _trigger_epoch(self):
88 | tm = CallbackTimeLogger()
89 |
90 | for cb in self.cbs:
91 | display_name = str(cb)
92 | with tm.timed_callback(display_name):
93 | cb.trigger_epoch()
94 | tm.log()
95 |
96 | def _before_epoch(self):
97 | for cb in self.cbs:
98 | cb.before_epoch()
99 |
100 | def _after_epoch(self):
101 | for cb in self.cbs:
102 | cb.after_epoch()
103 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/hooks.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: hooks.py
4 |
5 |
6 | """ Compatible layers between tf.train.SessionRunHook and Callback"""
7 |
8 | import tensorflow as tf
9 | from .base import Callback
10 |
11 |
12 | __all__ = ['CallbackToHook', 'HookToCallback']
13 |
14 |
15 | class CallbackToHook(tf.train.SessionRunHook):
16 | """ This is only for internal implementation of
17 | before_run/after_run callbacks.
18 | You shouldn't need to use this.
19 | """
20 |
21 | _chief_only = False
22 |
23 | def __init__(self, cb):
24 | self._cb = cb
25 |
26 | def before_run(self, ctx):
27 | return self._cb.before_run(ctx)
28 |
29 | def after_run(self, ctx, vals):
30 | self._cb.after_run(ctx, vals)
31 |
32 |
33 | class HookToCallback(Callback):
34 | """
35 | Make a ``tf.train.SessionRunHook`` into a callback.
36 | Note that the `coord` argument in `after_create_session` will be None.
37 | """
38 |
39 | _chief_only = False
40 |
41 | def __init__(self, hook):
42 | """
43 | Args:
44 | hook (tf.train.SessionRunHook):
45 | """
46 | self._hook = hook
47 |
48 | def _setup_graph(self):
49 | with tf.name_scope(None): # jump out of the name scope
50 | self._hook.begin()
51 |
52 | def _before_train(self):
53 | sess = tf.get_default_session()
54 | # coord is set to None when converting
55 | self._hook.after_create_session(sess, None)
56 |
57 | def _before_run(self, ctx):
58 | return self._hook.before_run(ctx)
59 |
60 | def _after_run(self, ctx, run_values):
61 | self._hook.after_run(ctx, run_values)
62 |
63 | def _after_train(self):
64 | self._hook.end(self.trainer.sess)
65 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/misc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File: misc.py
3 |
4 |
5 | import os
6 | import time
7 | from collections import deque
8 | import numpy as np
9 |
10 | from .base import Callback
11 | from ..utils.utils import humanize_time_delta
12 | from ..utils import logger
13 |
14 | __all__ = ['SendStat', 'InjectShell', 'EstimatedTimeLeft']
15 |
16 |
17 | class SendStat(Callback):
18 | """ An equivalent of :class:`SendMonitorData`, but as a normal callback. """
19 | def __init__(self, command, names):
20 | self.command = command
21 | if not isinstance(names, list):
22 | names = [names]
23 | self.names = names
24 |
25 | def _trigger(self):
26 | M = self.trainer.monitors
27 | v = {k: M.get_latest(k) for k in self.names}
28 | cmd = self.command.format(**v)
29 | ret = os.system(cmd)
30 | if ret != 0:
31 | logger.error("Command {} failed with ret={}!".format(cmd, ret))
32 |
33 |
34 | class InjectShell(Callback):
35 | """
36 | Allow users to create a specific file as a signal to pause
37 | and iteratively debug the training.
38 | Once triggered, it detects whether the file exists, and opens an
39 | IPython/pdb shell if yes.
40 | In the shell, `self` is this callback, `self.trainer` is the trainer, and
41 | from that you can access everything else.
42 | """
43 |
44 | def __init__(self, file='INJECT_SHELL.tmp', shell='ipython'):
45 | """
46 | Args:
47 | file (str): if this file exists, will open a shell.
48 | shell (str): one of 'ipython', 'pdb'
49 | """
50 | self._file = file
51 | assert shell in ['ipython', 'pdb']
52 | self._shell = shell
53 | logger.info("Create a file '{}' to open {} shell.".format(file, shell))
54 |
55 | def _trigger(self):
56 | if os.path.isfile(self._file):
57 | logger.info("File {} exists, entering shell.".format(self._file))
58 | self._inject()
59 |
60 | def _inject(self):
61 | trainer = self.trainer # noqa
62 | if self._shell == 'ipython':
63 | import IPython as IP # noqa
64 | IP.embed()
65 | elif self._shell == 'pdb':
66 | import pdb # noqa
67 | pdb.set_trace()
68 |
69 | def _after_train(self):
70 | if os.path.isfile(self._file):
71 | os.unlink(self._file)
72 |
73 |
74 | class EstimatedTimeLeft(Callback):
75 | """
76 | Estimate the time left until completion of training.
77 | """
78 | def __init__(self, last_k_epochs=5):
79 | """
80 | Args:
81 | last_k_epochs (int): Use the time spent on last k epochs to
82 | estimate total time left.
83 | """
84 | self._times = deque(maxlen=last_k_epochs)
85 |
86 | def _before_train(self):
87 | self._max_epoch = self.trainer.max_epoch
88 | self._last_time = time.time()
89 |
90 | def _trigger_epoch(self):
91 | duration = time.time() - self._last_time
92 | self._last_time = time.time()
93 | self._times.append(duration)
94 |
95 | average_epoch_time = np.mean(self._times)
96 | time_left = (self._max_epoch - self.epoch_num) * average_epoch_time
97 | if time_left > 0:
98 | logger.info("Estimated Time Left: " + humanize_time_delta(time_left))
99 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/callbacks/stats.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: stats.py
4 |
5 | # for compatibility only
6 | from .misc import InjectShell, SendStat # noqa
7 | from .graph import DumpParamAsImage # noqa
8 |
9 | __all__ = []
10 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/contrib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wanggrun/Kalman-Normalization/77fb6503beca34b25f1b4798c4a103b7fdcf9c47/tensorpack-installed/tensorpack/contrib/__init__.py
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .base import *
9 | from .common import *
10 | from .format import *
11 | from .image import *
12 | from .parallel_map import *
13 | from .parallel import *
14 | from .raw import *
15 | from .remote import *
16 | from . import imgaug
17 | from . import dataset
18 | from . import dftools
19 |
20 |
21 | from pkgutil import iter_modules
22 | import os
23 | import os.path
24 | from ..utils.develop import LazyLoader
25 |
26 | __all__ = []
27 |
28 |
29 | def _global_import(name):
30 | p = __import__(name, globals(), locals(), level=1)
31 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
32 | if lst:
33 | del globals()[name]
34 | for k in lst:
35 | if not k.startswith('__'):
36 | globals()[k] = p.__dict__[k]
37 | __all__.append(k)
38 |
39 |
40 | __SKIP = set(['dftools', 'dataset', 'imgaug'])
41 | _CURR_DIR = os.path.dirname(__file__)
42 | for _, module_name, __ in iter_modules(
43 | [os.path.dirname(__file__)]):
44 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
45 | if not os.path.isfile(srcpath):
46 | continue
47 | if not module_name.startswith('_') and \
48 | module_name not in __SKIP:
49 | _global_import(module_name)
50 |
51 |
52 | globals()['dataset'] = LazyLoader('dataset', globals(), 'tensorpack.dataflow.dataset')
53 | globals()['imgaug'] = LazyLoader('imgaug', globals(), 'tensorpack.dataflow.imgaug')
54 |
55 | del LazyLoader
56 |
57 | __all__.extend(['imgaug', 'dftools', 'dataset'])
58 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/base.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: base.py
4 |
5 |
6 | import threading
7 | from abc import abstractmethod, ABCMeta
8 | import six
9 | from ..utils.utils import get_rng
10 |
11 | __all__ = ['DataFlow', 'ProxyDataFlow', 'RNGDataFlow', 'DataFlowTerminated']
12 |
13 |
14 | class DataFlowTerminated(BaseException):
15 | """
16 | An exception indicating that the DataFlow is unable to produce any more
17 | data, i.e. something wrong happened so that calling :meth:`get_data`
18 | cannot give a valid iterator any more.
19 | In most DataFlow this will never be raised.
20 | """
21 | pass
22 |
23 |
24 | class DataFlowReentrantGuard(object):
25 | """
26 | A tool to enforce non-reentrancy.
27 | Mostly used on DataFlow whose :meth:`get_data` is stateful,
28 | so that multiple instances of the iterator cannot co-exist.
29 | """
30 | def __init__(self):
31 | self._lock = threading.Lock()
32 |
33 | def __enter__(self):
34 | self._succ = self._lock.acquire(False)
35 | if not self._succ:
36 | raise threading.ThreadError("This DataFlow is not reentrant!")
37 |
38 | def __exit__(self, exc_type, exc_val, exc_tb):
39 | self._lock.release()
40 | return False
41 |
42 |
43 | @six.add_metaclass(ABCMeta)
44 | class DataFlow(object):
45 | """ Base class for all DataFlow """
46 |
47 | @abstractmethod
48 | def get_data(self):
49 | """
50 | The method to generate datapoints.
51 |
52 | Yields:
53 | list: The datapoint, i.e. list of components.
54 | """
55 |
56 | def size(self):
57 | """
58 | Returns:
59 | int: size of this data flow.
60 |
61 | Raises:
62 | :class:`NotImplementedError` if this DataFlow doesn't have a size.
63 | """
64 | raise NotImplementedError()
65 |
66 | def reset_state(self):
67 | """
68 | Reset state of the dataflow. It has to be called before producing datapoints.
69 |
70 | For example, RNG **has to** be reset if used in the DataFlow,
71 | otherwise it won't work well with prefetching, because different
72 | processes will have the same RNG state.
73 | """
74 | pass
75 |
76 |
77 | class RNGDataFlow(DataFlow):
78 | """ A DataFlow with RNG"""
79 |
80 | def reset_state(self):
81 | """ Reset the RNG """
82 | self.rng = get_rng(self)
83 |
84 |
85 | class ProxyDataFlow(DataFlow):
86 | """ Base class for DataFlow that proxies another.
87 | Every method is proxied to ``self.ds`` unless override by subclass.
88 | """
89 |
90 | def __init__(self, ds):
91 | """
92 | Args:
93 | ds (DataFlow): DataFlow to proxy.
94 | """
95 | self.ds = ds
96 |
97 | def reset_state(self):
98 | self.ds.reset_state()
99 |
100 | def size(self):
101 | return self.ds.size()
102 |
103 | def get_data(self):
104 | return self.ds.get_data()
105 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .bsds500 import *
9 | from .cifar import *
10 | from .ilsvrc import *
11 | from .mnist import *
12 | from .svhn import *
13 |
14 | from pkgutil import iter_modules
15 | import os
16 | import os.path
17 |
18 | __all__ = []
19 |
20 |
21 | def global_import(name):
22 | p = __import__(name, globals(), locals(), level=1)
23 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
24 | if lst:
25 | del globals()[name]
26 | for k in lst:
27 | if not k.startswith('__'):
28 | globals()[k] = p.__dict__[k]
29 | __all__.append(k)
30 |
31 |
32 | _CURR_DIR = os.path.dirname(__file__)
33 | for _, module_name, _ in iter_modules(
34 | [_CURR_DIR]):
35 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
36 | if not os.path.isfile(srcpath):
37 | continue
38 | if not module_name.startswith('_'):
39 | global_import(module_name)
40 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/dataset/bsds500.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: bsds500.py
4 |
5 |
6 | import os
7 | import glob
8 | import numpy as np
9 |
10 | from ...utils.fs import download, get_dataset_path
11 | from ..base import RNGDataFlow
12 |
13 | __all__ = ['BSDS500']
14 | DATA_URL = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
15 | IMG_W, IMG_H = 481, 321
16 |
17 |
18 | class BSDS500(RNGDataFlow):
19 | """
20 | `Berkeley Segmentation Data Set and Benchmarks 500 dataset
21 | `_.
22 |
23 | Produce ``(image, label)`` pair, where ``image`` has shape (321, 481, 3(BGR)) and
24 | ranges in [0,255].
25 | ``Label`` is a floating point image of shape (321, 481) in range [0, 1].
26 | The value of each pixel is ``number of times it is annotated as edge / total number of annotators for this image``.
27 | """
28 |
29 | def __init__(self, name, data_dir=None, shuffle=True):
30 | """
31 | Args:
32 | name (str): 'train', 'test', 'val'
33 | data_dir (str): a directory containing the original 'BSR' directory.
34 | """
35 | # check and download data
36 | if data_dir is None:
37 | data_dir = get_dataset_path('bsds500_data')
38 | if not os.path.isdir(os.path.join(data_dir, 'BSR')):
39 | download(DATA_URL, data_dir)
40 | filename = DATA_URL.split('/')[-1]
41 | filepath = os.path.join(data_dir, filename)
42 | import tarfile
43 | tarfile.open(filepath, 'r:gz').extractall(data_dir)
44 | self.data_root = os.path.join(data_dir, 'BSR', 'BSDS500', 'data')
45 | assert os.path.isdir(self.data_root)
46 |
47 | self.shuffle = shuffle
48 | assert name in ['train', 'test', 'val']
49 | self._load(name)
50 |
51 | def _load(self, name):
52 | image_glob = os.path.join(self.data_root, 'images', name, '*.jpg')
53 | image_files = glob.glob(image_glob)
54 | gt_dir = os.path.join(self.data_root, 'groundTruth', name)
55 | self.data = np.zeros((len(image_files), IMG_H, IMG_W, 3), dtype='uint8')
56 | self.label = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32')
57 |
58 | for idx, f in enumerate(image_files):
59 | im = cv2.imread(f, cv2.IMREAD_COLOR)
60 | assert im is not None
61 | if im.shape[0] > im.shape[1]:
62 | im = np.transpose(im, (1, 0, 2))
63 | assert im.shape[:2] == (IMG_H, IMG_W), "{} != {}".format(im.shape[:2], (IMG_H, IMG_W))
64 |
65 | imgid = os.path.basename(f).split('.')[0]
66 | gt_file = os.path.join(gt_dir, imgid)
67 | gt = loadmat(gt_file)['groundTruth'][0]
68 | n_annot = gt.shape[0]
69 | gt = sum(gt[k]['Boundaries'][0][0] for k in range(n_annot))
70 | gt = gt.astype('float32')
71 | gt *= 1.0 / n_annot
72 | if gt.shape[0] > gt.shape[1]:
73 | gt = gt.transpose()
74 | assert gt.shape == (IMG_H, IMG_W)
75 |
76 | self.data[idx] = im
77 | self.label[idx] = gt
78 |
79 | def size(self):
80 | return self.data.shape[0]
81 |
82 | def get_data(self):
83 | idxs = np.arange(self.data.shape[0])
84 | if self.shuffle:
85 | self.rng.shuffle(idxs)
86 | for k in idxs:
87 | yield [self.data[k], self.label[k]]
88 |
89 |
90 | try:
91 | from scipy.io import loadmat
92 | import cv2
93 | except ImportError:
94 | from ...utils.develop import create_dummy_class
95 | BSDS500 = create_dummy_class('BSDS500', ['scipy.io', 'cv2']) # noqa
96 |
97 | if __name__ == '__main__':
98 | a = BSDS500('val')
99 | for k in a.get_data():
100 | cv2.imshow("haha", k[1].astype('uint8') * 255)
101 | cv2.waitKey(1000)
102 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/dataset/svhn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: svhn.py
4 |
5 |
6 | import os
7 | import numpy as np
8 |
9 | from ...utils import logger
10 | from ...utils.fs import get_dataset_path, download
11 | from ..base import RNGDataFlow
12 |
13 | __all__ = ['SVHNDigit']
14 |
15 | SVHN_URL = "http://ufldl.stanford.edu/housenumbers/"
16 |
17 |
18 | class SVHNDigit(RNGDataFlow):
19 | """
20 | `SVHN `_ Cropped Digit Dataset.
21 | Produces [img, label], img of 32x32x3 in range [0,255], label of 0-9
22 | """
23 | _Cache = {}
24 |
25 | def __init__(self, name, data_dir=None, shuffle=True):
26 | """
27 | Args:
28 | name (str): 'train', 'test', or 'extra'.
29 | data_dir (str): a directory containing the original {train,test,extra}_32x32.mat.
30 | shuffle (bool): shuffle the dataset.
31 | """
32 | self.shuffle = shuffle
33 |
34 | if name in SVHNDigit._Cache:
35 | self.X, self.Y = SVHNDigit._Cache[name]
36 | return
37 | if data_dir is None:
38 | data_dir = get_dataset_path('svhn_data')
39 | assert name in ['train', 'test', 'extra'], name
40 | filename = os.path.join(data_dir, name + '_32x32.mat')
41 | if not os.path.isfile(filename):
42 | url = SVHN_URL + os.path.basename(filename)
43 | logger.info("File {} not found!".format(filename))
44 | logger.info("Downloading from {} ...".format(url))
45 | download(url, os.path.dirname(filename))
46 | logger.info("Loading {} ...".format(filename))
47 | data = scipy.io.loadmat(filename)
48 | self.X = data['X'].transpose(3, 0, 1, 2)
49 | self.Y = data['y'].reshape((-1))
50 | self.Y[self.Y == 10] = 0
51 | SVHNDigit._Cache[name] = (self.X, self.Y)
52 |
53 | def size(self):
54 | return self.X.shape[0]
55 |
56 | def get_data(self):
57 | n = self.X.shape[0]
58 | idxs = np.arange(n)
59 | if self.shuffle:
60 | self.rng.shuffle(idxs)
61 | for k in idxs:
62 | # since svhn is quite small, just do it for safety
63 | yield [self.X[k], self.Y[k]]
64 |
65 | @staticmethod
66 | def get_per_pixel_mean():
67 | """
68 | return 32x32x3 image
69 | """
70 | a = SVHNDigit('train')
71 | b = SVHNDigit('test')
72 | c = SVHNDigit('extra')
73 | return np.concatenate((a.X, b.X, c.X)).mean(axis=0)
74 |
75 |
76 | try:
77 | import scipy.io
78 | except ImportError:
79 | from ...utils.develop import create_dummy_class
80 | SVHNDigit = create_dummy_class('SVHNDigit', 'scipy.io') # noqa
81 |
82 | if __name__ == '__main__':
83 | a = SVHNDigit('train')
84 | b = SVHNDigit.get_per_pixel_mean()
85 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .base import *
9 | from .convert import *
10 | from .crop import *
11 | from .deform import *
12 | from .geometry import *
13 | from .imgproc import *
14 | from .meta import *
15 | from .misc import *
16 | from .noise import *
17 | from .paste import *
18 | from .transform import *
19 |
20 |
21 | import os
22 | from pkgutil import iter_modules
23 |
24 | __all__ = []
25 |
26 |
27 | def global_import(name):
28 | p = __import__(name, globals(), locals(), level=1)
29 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
30 | if lst:
31 | del globals()[name]
32 | for k in lst:
33 | if not k.startswith('__'):
34 | globals()[k] = p.__dict__[k]
35 | __all__.append(k)
36 |
37 |
38 | try:
39 | import cv2 # noqa
40 | except ImportError:
41 | from ...utils import logger
42 | logger.warn("Cannot import 'cv2', therefore image augmentation is not available.")
43 | else:
44 | _CURR_DIR = os.path.dirname(__file__)
45 | for _, module_name, _ in iter_modules(
46 | [os.path.dirname(__file__)]):
47 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
48 | if not os.path.isfile(srcpath):
49 | continue
50 | if not module_name.startswith('_'):
51 | global_import(module_name)
52 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: _test.py
4 |
5 |
6 | import sys
7 | import cv2
8 | from . import AugmentorList
9 | from .crop import *
10 | from .imgproc import *
11 | from .noname import *
12 | from .deform import *
13 | from .noise import SaltPepperNoise
14 |
15 |
16 | anchors = [(0.2, 0.2), (0.7, 0.2), (0.8, 0.8), (0.5, 0.5), (0.2, 0.5)]
17 | augmentors = AugmentorList([
18 | Contrast((0.8, 1.2)),
19 | Flip(horiz=True),
20 | GaussianDeform(anchors, (360, 480), 0.2, randrange=20),
21 | # RandomCropRandomShape(0.3),
22 | SaltPepperNoise()
23 | ])
24 |
25 | img = cv2.imread(sys.argv[1])
26 | newimg, prms = augmentors._augment_return_params(img)
27 | cv2.imshow(" ", newimg.astype('uint8'))
28 | cv2.waitKey()
29 |
30 | newimg = augmentors._augment(img, prms)
31 | cv2.imshow(" ", newimg.astype('uint8'))
32 | cv2.waitKey()
33 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/convert.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: convert.py
4 |
5 | from .base import ImageAugmentor
6 | from .meta import MapImage
7 | import numpy as np
8 | import cv2
9 |
10 | __all__ = ['ColorSpace', 'Grayscale', 'ToUint8', 'ToFloat32']
11 |
12 |
13 | class ColorSpace(ImageAugmentor):
14 | """ Convert into another colorspace. """
15 |
16 | def __init__(self, mode, keepdims=True):
17 | """
18 | Args:
19 | mode: opencv colorspace conversion code (e.g., `cv2.COLOR_BGR2HSV`)
20 | keepdims (bool): keep the dimension of image unchanged if opencv
21 | changes it.
22 | """
23 | self._init(locals())
24 |
25 | def _augment(self, img, _):
26 | transf = cv2.cvtColor(img, self.mode)
27 | if self.keepdims:
28 | if len(transf.shape) is not len(img.shape):
29 | transf = transf[..., None]
30 | return transf
31 |
32 |
33 | class Grayscale(ColorSpace):
34 | """ Convert image to grayscale. """
35 |
36 | def __init__(self, keepdims=True, rgb=False):
37 | """
38 | Args:
39 | keepdims (bool): return image of shape [H, W, 1] instead of [H, W]
40 | rgb (bool): interpret input as RGB instead of the default BGR
41 | """
42 | mode = cv2.COLOR_RGB2GRAY if rgb else cv2.COLOR_BGR2GRAY
43 | super(Grayscale, self).__init__(mode, keepdims)
44 |
45 |
46 | class ToUint8(MapImage):
47 | """ Convert image to uint8. Useful to reduce communication overhead. """
48 | def __init__(self):
49 | super(ToUint8, self).__init__(lambda x: np.clip(x, 0, 255).astype(np.uint8), lambda x: x)
50 |
51 |
52 | class ToFloat32(MapImage):
53 | """ Convert image to float32, may increase quality of the augmentor. """
54 | def __init__(self):
55 | super(ToFloat32, self).__init__(lambda x: x.astype(np.float32), lambda x: x)
56 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/crop.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: crop.py
3 |
4 |
5 | from ...utils.argtools import shape2d
6 | from .transform import TransformAugmentorBase, CropTransform
7 |
8 |
9 | __all__ = ['RandomCrop', 'CenterCrop', 'RandomCropRandomShape']
10 |
11 |
12 | class RandomCrop(TransformAugmentorBase):
13 | """ Randomly crop the image into a smaller one """
14 |
15 | def __init__(self, crop_shape):
16 | """
17 | Args:
18 | crop_shape: (h, w) tuple or a int
19 | """
20 | crop_shape = shape2d(crop_shape)
21 | super(RandomCrop, self).__init__()
22 | self._init(locals())
23 |
24 | def _get_augment_params(self, img):
25 | orig_shape = img.shape
26 | assert orig_shape[0] >= self.crop_shape[0] \
27 | and orig_shape[1] >= self.crop_shape[1], orig_shape
28 | diffh = orig_shape[0] - self.crop_shape[0]
29 | h0 = 0 if diffh == 0 else self.rng.randint(diffh)
30 | diffw = orig_shape[1] - self.crop_shape[1]
31 | w0 = 0 if diffw == 0 else self.rng.randint(diffw)
32 | return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
33 |
34 |
35 | class CenterCrop(TransformAugmentorBase):
36 | """ Crop the image at the center"""
37 |
38 | def __init__(self, crop_shape):
39 | """
40 | Args:
41 | crop_shape: (h, w) tuple or a int
42 | """
43 | crop_shape = shape2d(crop_shape)
44 | self._init(locals())
45 |
46 | def _get_augment_params(self, img):
47 | orig_shape = img.shape
48 | assert orig_shape[0] >= self.crop_shape[0] \
49 | and orig_shape[1] >= self.crop_shape[1], orig_shape
50 | h0 = int((orig_shape[0] - self.crop_shape[0]) * 0.5)
51 | w0 = int((orig_shape[1] - self.crop_shape[1]) * 0.5)
52 | return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
53 |
54 |
55 | class RandomCropRandomShape(TransformAugmentorBase):
56 | """ Random crop with a random shape"""
57 |
58 | def __init__(self, wmin, hmin,
59 | wmax=None, hmax=None,
60 | max_aspect_ratio=None):
61 | """
62 | Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive).
63 | If max is None, will use the input image shape.
64 |
65 | Args:
66 | wmin, hmin, wmax, hmax: range to sample shape.
67 | max_aspect_ratio (float): the upper bound of ``max(w,h)/min(w,h)``.
68 | """
69 | if max_aspect_ratio is None:
70 | max_aspect_ratio = 9999999
71 | self._init(locals())
72 |
73 | def _get_augment_params(self, img):
74 | hmax = self.hmax or img.shape[0]
75 | wmax = self.wmax or img.shape[1]
76 | h = self.rng.randint(self.hmin, hmax + 1)
77 | w = self.rng.randint(self.wmin, wmax + 1)
78 | diffh = img.shape[0] - h
79 | diffw = img.shape[1] - w
80 | assert diffh >= 0 and diffw >= 0
81 | y0 = 0 if diffh == 0 else self.rng.randint(diffh)
82 | x0 = 0 if diffw == 0 else self.rng.randint(diffw)
83 | return CropTransform(y0, x0, h, w)
84 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/deform.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: deform.py
3 |
4 |
5 | from .base import ImageAugmentor
6 | from ...utils import logger
7 | import numpy as np
8 |
9 | __all__ = ['GaussianDeform']
10 |
11 |
12 | class GaussianMap(object):
13 | """ Generate gaussian weighted deformation map"""
14 | # TODO really needs speedup
15 |
16 | def __init__(self, image_shape, sigma=0.5):
17 | assert len(image_shape) == 2
18 | self.shape = image_shape
19 | self.sigma = sigma
20 |
21 | def get_gaussian_weight(self, anchor):
22 | """
23 | Args:
24 | anchor: coordinate of the center
25 | """
26 | ret = np.zeros(self.shape, dtype='float32')
27 |
28 | y, x = np.mgrid[:self.shape[0], :self.shape[1]]
29 | y = y.astype('float32') / ret.shape[0] - anchor[0]
30 | x = x.astype('float32') / ret.shape[1] - anchor[1]
31 | g = np.exp(-(x**2 + y ** 2) / self.sigma)
32 | # cv2.imshow(" ", g)
33 | # cv2.waitKey()
34 | return g
35 |
36 |
37 | def np_sample(img, coords):
38 | # a numpy implementation of ImageSample layer
39 | coords = np.maximum(coords, 0)
40 | coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))
41 |
42 | lcoor = np.floor(coords).astype('int32')
43 | ucoor = lcoor + 1
44 | ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
45 | diff = coords - lcoor
46 | neg_diff = 1.0 - diff
47 |
48 | lcoory, lcoorx = np.split(lcoor, 2, axis=2)
49 | ucoory, ucoorx = np.split(ucoor, 2, axis=2)
50 | diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
51 | neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
52 | diffy, diffx = np.split(diff, 2, axis=2)
53 | ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)
54 |
55 | ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
56 | img[ucoory, ucoorx, :] * diffx * diffy + \
57 | img[lcoory, ucoorx, :] * ndiffy * diffx + \
58 | img[ucoory, lcoorx, :] * diffy * ndiffx
59 | return ret[:, :, 0, :]
60 |
61 |
62 | class GaussianDeform(ImageAugmentor):
63 | """
64 | Some kind of slow deformation I made up. Don't count on it.
65 | """
66 |
67 | # TODO input/output with different shape
68 |
69 | def __init__(self, anchors, shape, sigma=0.5, randrange=None):
70 | """
71 | Args:
72 | anchors (list): list of center coordinates in range [0,1].
73 | shape(list or tuple): image shape in [h, w].
74 | sigma (float): sigma for Gaussian weight
75 | randrange (int): offset range. Defaults to shape[0] / 8
76 | """
77 | logger.warn("GaussianDeform is slow. Consider using it with 4 or more prefetching processes.")
78 | super(GaussianDeform, self).__init__()
79 | self.anchors = anchors
80 | self.K = len(self.anchors)
81 | self.shape = shape
82 | self.grid = np.mgrid[0:self.shape[0], 0:self.shape[1]].transpose(1, 2, 0)
83 | self.grid = self.grid.astype('float32') # HxWx2
84 |
85 | gm = GaussianMap(self.shape, sigma=sigma)
86 | self.gws = np.array([gm.get_gaussian_weight(ank)
87 | for ank in self.anchors], dtype='float32') # KxHxW
88 | self.gws = self.gws.transpose(1, 2, 0) # HxWxK
89 | if randrange is None:
90 | self.randrange = self.shape[0] / 8
91 | else:
92 | self.randrange = randrange
93 | self.sigma = sigma
94 |
95 | def _get_augment_params(self, img):
96 | v = self.rng.rand(self.K, 2).astype('float32') - 0.5
97 | v = v * 2 * self.randrange
98 | return v
99 |
100 | def _augment(self, img, v):
101 | grid = self.grid + np.dot(self.gws, v)
102 | return np_sample(img, grid)
103 |
104 | def _augment_coords(self, coords, param):
105 | raise NotImplementedError()
106 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/gr_rotate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: misc.py
3 |
4 | import numpy as np
5 | import cv2
6 |
7 | # from tensorpack.dataflow.imgaug.base import ImageAugmentor
8 | # from tensorpack.utils import logger
9 | # from tensorpack.utils.argtools import shape2d
10 | # from tensorpack.dataflow.imgaug.transform import ResizeTransform, TransformAugmentorBase
11 |
12 | from .base import ImageAugmentor
13 | from ...utils import logger
14 | from ...utils.argtools import shape2d
15 | from .transform import ResizeTransform, TransformAugmentorBase
16 | import math
17 |
18 |
19 | __all__ = ['GrRotate']
20 |
21 |
22 | class GrRotate(ImageAugmentor):
23 | """
24 | """
25 | def __init__(self, angle=0):
26 | """
27 | """
28 | super(GrRotate, self).__init__()
29 | if angle < 0 or angle > 180:
30 | raise ValueError("Angle should be between [0, 180]!")
31 | self._init(locals())
32 |
33 | def _get_augment_params(self, img):
34 | h, w = img.shape[:2]
35 | do = int(self._rand_range() * float(self.angle) * 2 - float(self.angle))
36 | return (do, h, w)
37 |
38 | def _augment(self, img, param):
39 | do, h, w = param
40 | h_New=int(w*math.fabs(math.sin(math.radians(do)))+h*math.fabs(math.cos(math.radians(do))))
41 | w_New=int(h*math.fabs(math.sin(math.radians(do)))+w*math.fabs(math.cos(math.radians(do))))
42 |
43 | matRotation=cv2.getRotationMatrix2D((w/2,h/2),do,1)
44 | matRotation[0,2] +=(w_New-w)/2
45 | matRotation[1,2] +=(h_New-h)/2
46 | ret=cv2.warpAffine(img,matRotation,(w_New,h_New),borderValue=(128,128,128))
47 | ret = cv2.resize(ret, (w, h))
48 |
49 | if img.ndim == 3 and ret.ndim == 2:
50 | ret = ret[:, :, np.newaxis]
51 | return ret
52 |
53 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/noise.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: noise.py
4 |
5 |
6 | from .base import ImageAugmentor
7 | import numpy as np
8 | import cv2
9 |
10 | __all__ = ['JpegNoise', 'GaussianNoise', 'SaltPepperNoise']
11 |
12 |
13 | class JpegNoise(ImageAugmentor):
14 | """ Random Jpeg noise. """
15 |
16 | def __init__(self, quality_range=(40, 100)):
17 | """
18 | Args:
19 | quality_range (tuple): range to sample Jpeg quality
20 | """
21 | super(JpegNoise, self).__init__()
22 | self._init(locals())
23 |
24 | def _get_augment_params(self, img):
25 | return self.rng.randint(*self.quality_range)
26 |
27 | def _augment(self, img, q):
28 | enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
29 | return cv2.imdecode(enc, 1).astype(img.dtype)
30 |
31 |
32 | class GaussianNoise(ImageAugmentor):
33 | """
34 | Add random Gaussian noise N(0, sigma^2) of the same shape to img.
35 | """
36 | def __init__(self, sigma=1, clip=True):
37 | """
38 | Args:
39 | sigma (float): stddev of the Gaussian distribution.
40 | clip (bool): clip the result to [0,255] in the end.
41 | """
42 | super(GaussianNoise, self).__init__()
43 | self._init(locals())
44 |
45 | def _get_augment_params(self, img):
46 | return self.rng.randn(*img.shape)
47 |
48 | def _augment(self, img, noise):
49 | old_dtype = img.dtype
50 | ret = img + noise * self.sigma
51 | if self.clip or old_dtype == np.uint8:
52 | ret = np.clip(ret, 0, 255)
53 | return ret.astype(old_dtype)
54 |
55 |
56 | class SaltPepperNoise(ImageAugmentor):
57 | """ Salt and pepper noise.
58 | Randomly set some elements in img to 0 or 255, regardless of its channels.
59 | """
60 |
61 | def __init__(self, white_prob=0.05, black_prob=0.05):
62 | """
63 | Args:
64 | white_prob (float), black_prob (float): probabilities setting an element to 255 or 0.
65 | """
66 | assert white_prob + black_prob <= 1, "Sum of probabilities cannot be greater than 1"
67 | super(SaltPepperNoise, self).__init__()
68 | self._init(locals())
69 |
70 | def _get_augment_params(self, img):
71 | return self.rng.uniform(low=0, high=1, size=img.shape)
72 |
73 | def _augment(self, img, param):
74 | img[param > (1 - self.white_prob)] = 255
75 | img[param < self.black_prob] = 0
76 | return img
77 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/dataflow/imgaug/paste.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: paste.py
4 |
5 |
6 | from .base import ImageAugmentor
7 |
8 | from abc import abstractmethod
9 | import numpy as np
10 |
11 | __all__ = ['CenterPaste', 'BackgroundFiller', 'ConstantBackgroundFiller',
12 | 'RandomPaste']
13 |
14 |
15 | class BackgroundFiller(object):
16 | """ Base class for all BackgroundFiller"""
17 |
18 | def fill(self, background_shape, img):
19 | """
20 | Return a proper background image of background_shape, given img.
21 |
22 | Args:
23 | background_shape (tuple): a shape (h, w)
24 | img: an image
25 | Returns:
26 | a background image
27 | """
28 | background_shape = tuple(background_shape)
29 | return self._fill(background_shape, img)
30 |
31 | @abstractmethod
32 | def _fill(self, background_shape, img):
33 | pass
34 |
35 |
36 | class ConstantBackgroundFiller(BackgroundFiller):
37 | """ Fill the background by a constant """
38 |
39 | def __init__(self, value):
40 | """
41 | Args:
42 | value (float): the value to fill the background.
43 | """
44 | self.value = value
45 |
46 | def _fill(self, background_shape, img):
47 | assert img.ndim in [3, 2]
48 | if img.ndim == 3:
49 | return_shape = background_shape + (img.shape[2],)
50 | else:
51 | return_shape = background_shape
52 | return np.zeros(return_shape, dtype=img.dtype) + self.value
53 |
54 |
55 | class CenterPaste(ImageAugmentor):
56 | """
57 | Paste the image onto the center of a background canvas.
58 | """
59 |
60 | def __init__(self, background_shape, background_filler=None):
61 | """
62 | Args:
63 | background_shape (tuple): shape of the background canvas.
64 | background_filler (BackgroundFiller): How to fill the background. Defaults to zero-filler.
65 | """
66 | if background_filler is None:
67 | background_filler = ConstantBackgroundFiller(0)
68 |
69 | self._init(locals())
70 |
71 | def _augment(self, img, _):
72 | img_shape = img.shape[:2]
73 | assert self.background_shape[0] >= img_shape[0] and self.background_shape[1] >= img_shape[1]
74 |
75 | background = self.background_filler.fill(
76 | self.background_shape, img)
77 | y0 = int((self.background_shape[0] - img_shape[0]) * 0.5)
78 | x0 = int((self.background_shape[1] - img_shape[1]) * 0.5)
79 | background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
80 | return background
81 |
82 | def _augment_coords(self, coords, param):
83 | raise NotImplementedError()
84 |
85 |
86 | class RandomPaste(CenterPaste):
87 | """
88 | Randomly paste the image onto a background convas.
89 | """
90 |
91 | def _get_augment_params(self, img):
92 | img_shape = img.shape[:2]
93 | assert self.background_shape[0] > img_shape[0] and self.background_shape[1] > img_shape[1]
94 |
95 | y0 = self._rand_range(self.background_shape[0] - img_shape[0])
96 | x0 = self._rand_range(self.background_shape[1] - img_shape[1])
97 | return int(x0), int(y0)
98 |
99 | def _augment(self, img, loc):
100 | x0, y0 = loc
101 | img_shape = img.shape[:2]
102 | background = self.background_filler.fill(
103 | self.background_shape, img)
104 | background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
105 | return background
106 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/graph_builder/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .model_desc import *
9 | from .training import *
10 | from .distributed import *
11 | from .predict import *
12 | from .utils import *
13 |
14 | from pkgutil import iter_modules
15 | import os
16 | import os.path
17 |
18 | __all__ = []
19 |
20 | def global_import(name):
21 | p = __import__(name, globals(), locals(), level=1)
22 | lst = p.__all__ if '__all__' in dir(p) else []
23 | del globals()[name]
24 | for k in lst:
25 | if not k.startswith('__'):
26 | globals()[k] = p.__dict__[k]
27 | __all__.append(k)
28 |
29 |
30 | _CURR_DIR = os.path.dirname(__file__)
31 | _SKIP = []
32 | for _, module_name, _ in iter_modules(
33 | [_CURR_DIR]):
34 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
35 | if not os.path.isfile(srcpath):
36 | continue
37 | if module_name.startswith('_'):
38 | continue
39 | if module_name not in _SKIP:
40 | global_import(module_name)
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/graph_builder/predict.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: predict.py
4 |
5 | import tensorflow as tf
6 |
7 | from ..utils import logger
8 | from ..tfutils.tower import TowerContext
9 | from .training import GraphBuilder
10 |
11 | __all__ = ['SimplePredictBuilder']
12 |
13 |
14 | class SimplePredictBuilder(GraphBuilder):
15 | """
16 | Single-tower predictor.
17 | """
18 | def __init__(self, ns_name='', vs_name='', device=0):
19 | """
20 | Args:
21 | ns_name (str):
22 | vs_name (str):
23 | device (int):
24 | """
25 | self._ns_name = ns_name
26 | self._vs_name = vs_name
27 |
28 | device = '/gpu:{}'.format(device) if device >= 0 else '/cpu:0'
29 | self._device = device
30 |
31 | def build(self, input, tower_fn):
32 | """
33 | Args:
34 | input (InputSource): must have been setup
35 | tower_fn ( [tf.Tensors] ->): callable that takes input tensors.
36 |
37 | Returns:
38 | The return value of tower_fn called under the proper context.
39 | """
40 | assert input.setup_done()
41 | logger.info("Building predictor tower '{}' on device {} ...".format(
42 | self._ns_name, self._device))
43 |
44 | with tf.device(self._device), \
45 | TowerContext(
46 | self._ns_name, is_training=False, vs_name=self._vs_name):
47 | inputs = input.get_input_tensors()
48 | assert isinstance(inputs, (list, tuple)), inputs
49 | return tower_fn(*inputs)
50 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/input_source/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .input_source_base import *
9 | from .input_source import *
10 |
11 | from pkgutil import iter_modules
12 | import os
13 | import os.path
14 |
15 | __all__ = []
16 |
17 |
18 | def global_import(name):
19 | p = __import__(name, globals(), locals(), level=1)
20 | lst = p.__all__ if '__all__' in dir(p) else []
21 | del globals()[name]
22 | for k in lst:
23 | if not k.startswith('__'):
24 | globals()[k] = p.__dict__[k]
25 | __all__.append(k)
26 |
27 |
28 | _CURR_DIR = os.path.dirname(__file__)
29 | _SKIP = []
30 | for _, module_name, _ in iter_modules(
31 | [_CURR_DIR]):
32 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
33 | if not os.path.isfile(srcpath):
34 | continue
35 | if module_name.startswith('_'):
36 | continue
37 | if module_name not in _SKIP:
38 | global_import(module_name)
39 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/libinfo.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 |
4 | # issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
5 | os.environ['OPENCV_OPENCL_RUNTIME'] = 'disabled' # https://github.com/opencv/opencv/pull/10155
6 | try:
7 | # issue#1924 may happen on old systems
8 | import cv2 # noqa
9 | if int(cv2.__version__.split('.')[0]) == 3:
10 | cv2.ocl.setUseOpenCL(False)
11 | # check if cv is built with cuda
12 | info = cv2.getBuildInformation().split('\n')
13 | for line in info:
14 | if 'use cuda' in line.lower():
15 | answer = line.split()[-1].lower()
16 | if answer == 'yes':
17 | # issue#1197
18 | print("OpenCV is built with CUDA support. "
19 | "This may cause slow initialization or sometimes segfault with TensorFlow.")
20 | break
21 | except (ImportError, TypeError):
22 | pass
23 |
24 | os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # issue#9339
25 | os.environ['TF_AUTOTUNE_THRESHOLD'] = '2' # use more warm-up
26 |
27 | # Since 1.3, this is not needed
28 | os.environ['TF_AVGPOOL_USE_CUDNN'] = '1' # issue#8566
29 |
30 | # TF1.5 features
31 | os.environ['TF_SYNC_ON_FINISH'] = '0' # will become default
32 | os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
33 | os.environ['TF_GPU_THREAD_COUNT'] = '2'
34 |
35 | # Available in TF1.6+. Haven't seen different performance on R50.
36 | # NOTE TF set it to 0 by default, because:
37 | # this mode may use scaled atomic integer reduction that may cause a numerical
38 | # overflow for certain input data range.
39 | # os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
40 |
41 | try:
42 | import tensorflow as tf # noqa
43 | _version = tf.__version__.split('.')
44 | assert int(_version[0]) >= 1, "TF>=1.0 is required!"
45 | if int(_version[1]) < 3:
46 | print("TF<1.3 support will be removed after 2018-03-15! Actually many examples already require TF>=1.3.")
47 | _HAS_TF = True
48 | except ImportError:
49 | _HAS_TF = False
50 |
51 |
52 | __version__ = '0.8.5'
53 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .batch_norm import *
9 | from .common import *
10 | from .conv2d import *
11 | from .fc import *
12 | from .image_sample import *
13 | from .layer_norm import *
14 | from .linearwrap import *
15 | from .nonlin import *
16 | from .pool import *
17 | from .regularize import *
18 |
19 |
20 | from pkgutil import iter_modules
21 | import os
22 | import os.path
23 | # this line is necessary for _TFModuleFunc to work
24 | import tensorflow as tf # noqa: F401
25 |
26 | __all__ = []
27 |
28 |
29 | def _global_import(name):
30 | p = __import__(name, globals(), locals(), level=1)
31 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
32 | del globals()[name]
33 | for k in lst:
34 | if not k.startswith('__'):
35 | globals()[k] = p.__dict__[k]
36 | __all__.append(k)
37 |
38 |
39 | _CURR_DIR = os.path.dirname(__file__)
40 | _SKIP = ['utils', 'registry', 'tflayer']
41 | for _, module_name, _ in iter_modules(
42 | [_CURR_DIR]):
43 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
44 | if not os.path.isfile(srcpath):
45 | continue
46 | if module_name.startswith('_'):
47 | continue
48 | if module_name not in _SKIP:
49 | _global_import(module_name)
50 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: _test.py
4 |
5 |
6 | import logging
7 | import tensorflow as tf
8 | import unittest
9 |
10 |
11 | class TestModel(unittest.TestCase):
12 |
13 | def run_variable(self, var):
14 | sess = tf.Session()
15 | sess.run(tf.global_variables_initializer())
16 | if isinstance(var, list):
17 | return sess.run(var)
18 | else:
19 | return sess.run([var])[0]
20 |
21 | def make_variable(self, *args):
22 | if len(args) > 1:
23 | return [tf.Variable(k) for k in args]
24 | else:
25 | return tf.Variable(args[0])
26 |
27 |
28 | def run_test_case(case):
29 | suite = unittest.TestLoader().loadTestsFromTestCase(case)
30 | unittest.TextTestRunner(verbosity=2).run(suite)
31 |
32 |
33 | if __name__ == '__main__':
34 | import tensorpack
35 | from tensorpack.utils import logger
36 | from . import *
37 | logger.setLevel(logging.CRITICAL)
38 | subs = tensorpack.models._test.TestModel.__subclasses__()
39 | for cls in subs:
40 | run_test_case(cls)
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/common.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: common.py
4 |
5 | from .registry import layer_register # noqa
6 | from .utils import VariableHolder # noqa
7 |
8 | __all__ = ['layer_register', 'VariableHolder']
9 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/fc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: fc.py
4 |
5 |
6 | import tensorflow as tf
7 |
8 | from .common import layer_register, VariableHolder
9 | from .tflayer import convert_to_tflayer_args, rename_get_variable
10 | from ..tfutils import symbolic_functions as symbf
11 |
12 | __all__ = ['FullyConnected']
13 |
14 |
15 | @layer_register(log_shape=True)
16 | @convert_to_tflayer_args(
17 | args_names=['units'],
18 | name_mapping={'out_dim': 'units'})
19 | def FullyConnected(
20 | inputs,
21 | units,
22 | activation=None,
23 | use_bias=True,
24 | kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
25 | bias_initializer=tf.zeros_initializer(),
26 | kernel_regularizer=None,
27 | bias_regularizer=None,
28 | activity_regularizer=None):
29 | """
30 | A wrapper around `tf.layers.Dense`.
31 | One difference to maintain backward-compatibility:
32 | Default weight initializer is variance_scaling_initializer(2.0).
33 |
34 | Variable Names:
35 |
36 | * ``W``: weights of shape [in_dim, out_dim]
37 | * ``b``: bias
38 | """
39 |
40 | inputs = symbf.batch_flatten(inputs)
41 | with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
42 | layer = tf.layers.Dense(
43 | units=units,
44 | activation=activation,
45 | use_bias=use_bias,
46 | kernel_initializer=kernel_initializer,
47 | bias_initializer=bias_initializer,
48 | kernel_regularizer=kernel_regularizer,
49 | bias_regularizer=bias_regularizer,
50 | activity_regularizer=activity_regularizer)
51 | ret = layer.apply(inputs, scope=tf.get_variable_scope())
52 | ret = tf.identity(ret, name='output')
53 |
54 | ret.variables = VariableHolder(W=layer.kernel)
55 | if use_bias:
56 | ret.variables.b = layer.bias
57 | return ret
58 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/layer_norm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: layer_norm.py
4 |
5 |
6 | import tensorflow as tf
7 | from .common import layer_register, VariableHolder
8 | from ..utils.argtools import get_data_format
9 |
10 | __all__ = ['LayerNorm', 'InstanceNorm']
11 |
12 |
13 | @layer_register()
14 | def LayerNorm(
15 | x, epsilon=1e-5,
16 | use_bias=True, use_scale=True,
17 | gamma_init=None, data_format='channels_last'):
18 | """
19 | Layer Normalization layer, as described in the paper:
20 | `Layer Normalization `_.
21 |
22 | Args:
23 | x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format.
24 | epsilon (float): epsilon to avoid divide-by-zero.
25 | use_scale, use_bias (bool): whether to use the extra affine transformation or not.
26 | """
27 | data_format = get_data_format(data_format, tfmode=False)
28 | shape = x.get_shape().as_list()
29 | ndims = len(shape)
30 | assert ndims in [2, 4]
31 |
32 | mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True)
33 |
34 | if data_format == 'NCHW':
35 | chan = shape[1]
36 | new_shape = [1, chan, 1, 1]
37 | else:
38 | chan = shape[-1]
39 | new_shape = [1, 1, 1, chan]
40 | if ndims == 2:
41 | new_shape = [1, chan]
42 |
43 | if use_bias:
44 | beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
45 | beta = tf.reshape(beta, new_shape)
46 | else:
47 | beta = tf.zeros([1] * ndims, name='beta')
48 | if use_scale:
49 | if gamma_init is None:
50 | gamma_init = tf.constant_initializer(1.0)
51 | gamma = tf.get_variable('gamma', [chan], initializer=gamma_init)
52 | gamma = tf.reshape(gamma, new_shape)
53 | else:
54 | gamma = tf.ones([1] * ndims, name='gamma')
55 |
56 | ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
57 |
58 | vh = ret.variables = VariableHolder()
59 | if use_scale:
60 | vh.gamma = gamma
61 | if use_bias:
62 | vh.beta = beta
63 | return ret
64 |
65 |
66 | @layer_register()
67 | def InstanceNorm(x, epsilon=1e-5, use_affine=True, gamma_init=None, data_format='channels_last'):
68 | """
69 | Instance Normalization, as in the paper:
70 | `Instance Normalization: The Missing Ingredient for Fast Stylization
71 | `_.
72 |
73 | Args:
74 | x (tf.Tensor): a 4D tensor.
75 | epsilon (float): avoid divide-by-zero
76 | use_affine (bool): whether to apply learnable affine transformation
77 | """
78 | data_format = get_data_format(data_format, tfmode=False)
79 | shape = x.get_shape().as_list()
80 | assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
81 |
82 | if data_format == 'NHWC':
83 | axis = [1, 2]
84 | ch = shape[3]
85 | new_shape = [1, 1, 1, ch]
86 | else:
87 | axis = [2, 3]
88 | ch = shape[1]
89 | new_shape = [1, ch, 1, 1]
90 | assert ch is not None, "Input of InstanceNorm require known channel!"
91 |
92 | mean, var = tf.nn.moments(x, axis, keep_dims=True)
93 |
94 | if not use_affine:
95 | return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output')
96 |
97 | beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
98 | beta = tf.reshape(beta, new_shape)
99 | if gamma_init is None:
100 | gamma_init = tf.constant_initializer(1.0)
101 | gamma = tf.get_variable('gamma', [ch], initializer=gamma_init)
102 | gamma = tf.reshape(gamma, new_shape)
103 | ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
104 |
105 | vh = ret.variables = VariableHolder()
106 | if use_affine:
107 | vh.gamma = gamma
108 | vh.beta = beta
109 | return ret
110 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/linearwrap.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: linearwrap.py
4 |
5 |
6 | import six
7 | from types import ModuleType
8 | from .registry import get_registered_layer
9 |
10 | __all__ = ['LinearWrap']
11 |
12 |
13 | class LinearWrap(object):
14 | """ A simple wrapper to easily create "linear" graph,
15 | consisting of layers / symbolic functions with only one input & output.
16 | """
17 |
18 | class _TFModuleFunc(object):
19 | def __init__(self, mod, tensor):
20 | self._mod = mod
21 | self._t = tensor
22 |
23 | def __getattr__(self, name):
24 | ret = getattr(self._mod, name)
25 | if isinstance(ret, ModuleType):
26 | return LinearWrap._TFModuleFunc(ret, self._t)
27 | else:
28 | # assume to be a tf function
29 | def f(*args, **kwargs):
30 | o = ret(self._t, *args, **kwargs)
31 | return LinearWrap(o)
32 | return f
33 |
34 | def __init__(self, tensor):
35 | """
36 | Args:
37 | tensor (tf.Tensor): the tensor to wrap
38 | """
39 | self._t = tensor
40 |
41 | def __getattr__(self, layer_name):
42 | layer = get_registered_layer(layer_name)
43 | if layer is not None:
44 | # this is a registered tensorpack layer
45 | # parse arguments by tensorpack model convention
46 | if layer.use_scope:
47 | def layer_func(name, *args, **kwargs):
48 | ret = layer(name, self._t, *args, **kwargs)
49 | return LinearWrap(ret)
50 | else:
51 | def layer_func(*args, **kwargs):
52 | if len(args) and isinstance(args[0], six.string_types):
53 | name, args = args[0], args[1:]
54 | ret = layer(name, self._t, *args, **kwargs)
55 | else:
56 | ret = layer(self._t, *args, **kwargs)
57 | return LinearWrap(ret)
58 | return layer_func
59 | else:
60 | assert layer_name == 'tf', \
61 | "Calling LinearWrap.{}:" \
62 | " neither a layer nor 'tf'! " \
63 | "Did you forget to extract tensor from LinearWrap?".format(layer_name)
64 | import tensorflow as layer # noqa
65 | assert isinstance(layer, ModuleType), layer
66 | return LinearWrap._TFModuleFunc(layer, self._t)
67 |
68 | def apply(self, func, *args, **kwargs):
69 | """
70 | Apply a function on the wrapped tensor.
71 |
72 | Returns:
73 | LinearWrap: ``LinearWrap(func(self.tensor(), *args, **kwargs))``.
74 | """
75 | ret = func(self._t, *args, **kwargs)
76 | return LinearWrap(ret)
77 |
78 | def apply2(self, func, *args, **kwargs):
79 | """
80 | Apply a function on the wrapped tensor. The tensor
81 | will be the second argument of func.
82 |
83 | Returns:
84 | LinearWrap: ``LinearWrap(func(args[0], self.tensor(), *args[1:], **kwargs))``.
85 | """
86 | ret = func(args[0], self._t, *(args[1:]), **kwargs)
87 | return LinearWrap(ret)
88 |
89 | def __call__(self):
90 | """
91 | Returns:
92 | tf.Tensor: the underlying wrapped tensor.
93 | """
94 | return self._t
95 |
96 | def tensor(self):
97 | """
98 | Equivalent to ``self.__call__()``.
99 |
100 | Returns:
101 | tf.Tensor: the underlying wrapped tensor.
102 | """
103 | return self._t
104 |
105 | def print_tensor(self):
106 | """
107 | Print the underlying tensor and return self. Can be useful to get the
108 | name of tensors inside :class:`LinearWrap`.
109 |
110 | :return: self
111 | """
112 | print(self._t)
113 | return self
114 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/nonlin.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: UTF-8 -*-
3 | # File: nonlin.py
4 |
5 |
6 | import tensorflow as tf
7 |
8 | from .common import layer_register, VariableHolder
9 | from .batch_norm import BatchNorm
10 | from ..utils.develop import log_deprecated
11 |
12 | __all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU']
13 |
14 |
15 | @layer_register(use_scope=None)
16 | def Maxout(x, num_unit):
17 | """
18 | Maxout as in the paper `Maxout Networks `_.
19 |
20 | Args:
21 | x (tf.Tensor): a NHWC or NC tensor. Channel has to be known.
22 | num_unit (int): a int. Must be divisible by C.
23 |
24 | Returns:
25 | tf.Tensor: of shape NHW(C/num_unit) named ``output``.
26 | """
27 | input_shape = x.get_shape().as_list()
28 | ndim = len(input_shape)
29 | assert ndim == 4 or ndim == 2
30 | ch = input_shape[-1]
31 | assert ch is not None and ch % num_unit == 0
32 | if ndim == 4:
33 | x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit])
34 | else:
35 | x = tf.reshape(x, [-1, ch / num_unit, num_unit])
36 | return tf.reduce_max(x, ndim, name='output')
37 |
38 |
39 | @layer_register()
40 | def PReLU(x, init=0.001, name='output'):
41 | """
42 | Parameterized ReLU as in the paper `Delving Deep into Rectifiers: Surpassing
43 | Human-Level Performance on ImageNet Classification
44 | `_.
45 |
46 | Args:
47 | x (tf.Tensor): input
48 | init (float): initial value for the learnable slope.
49 | name (str): name of the output.
50 |
51 | Variable Names:
52 |
53 | * ``alpha``: learnable slope.
54 | """
55 | init = tf.constant_initializer(init)
56 | alpha = tf.get_variable('alpha', [], initializer=init)
57 | x = ((1 + alpha) * x + (1 - alpha) * tf.abs(x))
58 | ret = tf.multiply(x, 0.5, name=name)
59 |
60 | ret.variables = VariableHolder(alpha=alpha)
61 | return ret
62 |
63 |
64 | @layer_register(use_scope=None)
65 | def LeakyReLU(x, alpha, name='output'):
66 | """
67 | Leaky ReLU as in paper `Rectifier Nonlinearities Improve Neural Network Acoustic
68 | Models
69 | `_.
70 |
71 | Args:
72 | x (tf.Tensor): input
73 | alpha (float): the slope.
74 | """
75 | log_deprecated("LeakyReLU", "Use tf.nn.leaky_relu in TF 1.4 instead!", "2018-03-30")
76 | return tf.maximum(x, alpha * x, name=name)
77 |
78 |
79 | @layer_register(use_scope=None)
80 | def BNReLU(x, name=None):
81 | """
82 | A shorthand of BatchNormalization + ReLU.
83 | """
84 | x = BatchNorm('bn', x)
85 | x = tf.nn.relu(x, name=name)
86 | return x
87 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/shape_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: shape_utils.py
4 |
5 | import tensorflow as tf
6 |
7 | __all__ = []
8 |
9 |
10 | class StaticDynamicAxis(object):
11 | def __init__(self, static, dynamic):
12 | self.static = static
13 | self.dynamic = dynamic
14 |
15 | def apply(self, f):
16 | try:
17 | st = f(self.static)
18 | return StaticDynamicAxis(st, st)
19 | except TypeError:
20 | return StaticDynamicAxis(None, f(self.dynamic))
21 |
22 | def __str__(self):
23 | return "S={}, D={}".format(str(self.static), str(self.dynamic))
24 |
25 |
26 | def DynamicLazyAxis(shape, idx):
27 | return lambda: shape[idx]
28 |
29 |
30 | def StaticLazyAxis(dim):
31 | return lambda: dim
32 |
33 |
34 | class StaticDynamicShape(object):
35 | def __init__(self, tensor):
36 | assert isinstance(tensor, tf.Tensor), tensor
37 | ndims = tensor.shape.ndims
38 | self.static = tensor.shape.as_list()
39 | if tensor.shape.is_fully_defined():
40 | self.dynamic = self.static[:]
41 | else:
42 | dynamic = tf.shape(tensor)
43 | self.dynamic = [DynamicLazyAxis(dynamic, k) for k in range(ndims)]
44 |
45 | for k in range(ndims):
46 | if self.static[k] is not None:
47 | self.dynamic[k] = StaticLazyAxis(self.static[k])
48 |
49 | def apply(self, axis, f):
50 | if self.static[axis] is not None:
51 | try:
52 | st = f(self.static[axis])
53 | self.static[axis] = st
54 | self.dynamic[axis] = StaticLazyAxis(st)
55 | return
56 | except TypeError:
57 | pass
58 | self.static[axis] = None
59 | dyn = self.dynamic[axis]
60 | self.dynamic[axis] = lambda: f(dyn())
61 |
62 | def get_static(self):
63 | return self.static
64 |
65 | @property
66 | def ndims(self):
67 | return len(self.static)
68 |
69 | def get_dynamic(self, axis=None):
70 | if axis is None:
71 | return [self.dynamic[k]() for k in range(self.ndims)]
72 | return self.dynamic[axis]()
73 |
74 |
75 | if __name__ == '__main__':
76 | x = tf.placeholder(tf.float32, shape=[None, 3, None, 10])
77 | shape = StaticDynamicShape(x)
78 | shape.apply(1, lambda x: x * 3)
79 | shape.apply(2, lambda x: x + 5)
80 | print(shape.get_static())
81 | print(shape.get_dynamic())
82 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/shapes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: shapes.py
4 |
5 |
6 | import tensorflow as tf
7 | from .common import layer_register
8 |
9 | __all__ = ['ConcatWith']
10 |
11 |
12 | @layer_register(use_scope=None)
13 | def ConcatWith(x, tensor, dim):
14 | """
15 | A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.
16 |
17 | Args:
18 | x (tf.Tensor): input
19 | tensor (list[tf.Tensor]): a tensor or list of tensors to concatenate with x.
20 | x will be at the beginning
21 | dim (int): the dimension along which to concatenate
22 |
23 | Returns:
24 | tf.Tensor: ``tf.concat([x] + tensor, dim)``
25 | """
26 | if type(tensor) != list:
27 | tensor = [tensor]
28 | return tf.concat([x] + tensor, dim)
29 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/softmax.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*- File: softmax.py
3 |
4 |
5 | import tensorflow as tf
6 | from .common import layer_register
7 | from ..utils.develop import log_deprecated
8 |
9 | __all__ = ['SoftMax']
10 |
11 |
12 | @layer_register(use_scope=None)
13 | def SoftMax(x, use_temperature=False, temperature_init=1.0):
14 | """
15 | A SoftMax layer (w/o linear projection) with optional temperature, as
16 | defined in the paper `Distilling the Knowledge in a Neural Network
17 | `_.
18 |
19 | Args:
20 | x (tf.Tensor): input of any dimension. Softmax will be performed on
21 | the last dimension.
22 | use_temperature (bool): use a learnable temperature or not.
23 | temperature_init (float): initial value of the temperature.
24 |
25 | Returns:
26 | tf.Tensor: a tensor of the same shape named ``output``.
27 |
28 | Variable Names:
29 |
30 | * ``invtemp``: 1.0/temperature.
31 | """
32 | log_deprecated("models.SoftMax", "Please implement it by yourself!", "2018-05-01")
33 | if use_temperature:
34 | t = tf.get_variable('invtemp', [],
35 | initializer=tf.constant_initializer(1.0 / float(temperature_init)))
36 | x = x * t
37 | return tf.nn.softmax(x, name='output')
38 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/tflayer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: tflayer.py
4 |
5 | import tensorflow as tf
6 | import six
7 | import functools
8 |
9 | from ..utils.argtools import get_data_format
10 | from ..tfutils.common import get_tf_version_number
11 | from ..tfutils.varreplace import custom_getter_scope
12 |
13 |
14 | def map_common_tfargs(kwargs):
15 | df = kwargs.pop('data_format', None)
16 | if df is not None:
17 | df = get_data_format(df, tfmode=True)
18 | kwargs['data_format'] = df
19 |
20 | old_nl = kwargs.pop('nl', None)
21 | if old_nl is not None:
22 | kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
23 |
24 | if 'W_init' in kwargs:
25 | kwargs['kernel_initializer'] = kwargs.pop('W_init')
26 |
27 | if 'b_init' in kwargs:
28 | kwargs['bias_initializer'] = kwargs.pop('b_init')
29 | return kwargs
30 |
31 |
32 | def convert_to_tflayer_args(args_names, name_mapping):
33 | """
34 | After applying this decorator:
35 | 1. data_format becomes tf.layers style
36 | 2. nl becomes activation
37 | 3. initializers are renamed
38 | 4. positional args are transformed to correspoding kwargs, according to args_names
39 | 5. kwargs are mapped to tf.layers names if needed, by name_mapping
40 | """
41 |
42 | def decorator(func):
43 | @functools.wraps(func)
44 | def decorated_func(inputs, *args, **kwargs):
45 | kwargs = map_common_tfargs(kwargs)
46 |
47 | posarg_dic = {}
48 | assert len(args) <= len(args_names), \
49 | "Please use kwargs instead of positional args to call this model, " \
50 | "except for the following arguments: {}".format(', '.join(args_names))
51 | for pos_arg, name in zip(args, args_names):
52 | posarg_dic[name] = pos_arg
53 |
54 | ret = {}
55 | for name, arg in six.iteritems(kwargs):
56 | newname = name_mapping.get(name, None)
57 | if newname is not None:
58 | assert newname not in kwargs, \
59 | "Argument {} and {} conflicts!".format(name, newname)
60 | else:
61 | newname = name
62 | ret[newname] = arg
63 | ret.update(posarg_dic) # Let pos arg overwrite kw arg, for argscope to work
64 |
65 | return func(inputs, **ret)
66 |
67 | return decorated_func
68 |
69 | return decorator
70 |
71 |
72 | def rename_get_variable(mapping):
73 | """
74 | Args:
75 | mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
76 | """
77 | def custom_getter(getter, name, *args, **kwargs):
78 | splits = name.split('/')
79 | basename = splits[-1]
80 | if basename in mapping:
81 | basename = mapping[basename]
82 | splits[-1] = basename
83 | name = '/'.join(splits)
84 | return getter(name, *args, **kwargs)
85 | return custom_getter_scope(custom_getter)
86 |
87 |
88 | def monkeypatch_tf_layers():
89 | if get_tf_version_number() < 1.4:
90 | if not hasattr(tf.layers, 'Dense'):
91 | from tensorflow.python.layers.core import Dense
92 | tf.layers.Dense = Dense
93 |
94 | from tensorflow.python.layers.normalization import BatchNormalization
95 | tf.layers.BatchNormalization = BatchNormalization
96 |
97 | from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
98 | tf.layers.Conv2DTranspose = Conv2DTranspose
99 | tf.layers.Conv2D = Conv2D
100 |
101 | from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
102 | tf.layers.MaxPooling2D = MaxPooling2D
103 | tf.layers.AveragePooling2D = AveragePooling2D
104 |
105 |
106 | monkeypatch_tf_layers()
107 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/models/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: utils.py
4 |
5 | import six
6 |
7 |
8 | class VariableHolder(object):
9 | """ A proxy to access variables defined in a layer. """
10 | def __init__(self, **kwargs):
11 | """
12 | Args:
13 | kwargs: {name:variable}
14 | """
15 | self._vars = {}
16 | for k, v in six.iteritems(kwargs):
17 | self._add_variable(k, v)
18 |
19 | def _add_variable(self, name, var):
20 | assert name not in self._vars
21 | self._vars[name] = var
22 |
23 | def __setattr__(self, name, var):
24 | if not name.startswith('_'):
25 | self._add_variable(name, var)
26 | else:
27 | # private attributes
28 | super(VariableHolder, self).__setattr__(name, var)
29 |
30 | def __getattr__(self, name):
31 | return self._vars[name]
32 |
33 | def all(self):
34 | """
35 | Returns:
36 | list of all variables
37 | """
38 | return list(six.itervalues(self._vars))
39 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/predict/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .base import *
9 | from .concurrency import *
10 | from .config import *
11 | from .dataset import *
12 | from .multigpu import *
13 |
14 |
15 | from pkgutil import iter_modules
16 | import os
17 | import os.path
18 |
19 | __all__ = []
20 |
21 |
22 | def global_import(name):
23 | p = __import__(name, globals(), locals(), level=1)
24 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
25 | if lst:
26 | del globals()[name]
27 | for k in lst:
28 | globals()[k] = p.__dict__[k]
29 | __all__.append(k)
30 |
31 |
32 | _CURR_DIR = os.path.dirname(__file__)
33 | for _, module_name, _ in iter_modules(
34 | [_CURR_DIR]):
35 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
36 | if not os.path.isfile(srcpath):
37 | continue
38 | if module_name.startswith('_'):
39 | continue
40 | global_import(module_name)
41 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/predict/config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: config.py
3 |
4 |
5 | import tensorflow as tf
6 | import six
7 |
8 | from ..graph_builder import ModelDescBase
9 | from ..tfutils import get_default_sess_config
10 | from ..tfutils.tower import TowerFuncWrapper
11 | from ..tfutils.sessinit import SessionInit, JustCurrentSession
12 |
13 | __all__ = ['PredictConfig']
14 |
15 |
16 | class PredictConfig(object):
17 | def __init__(self,
18 | model=None,
19 | inputs_desc=None,
20 | tower_func=None,
21 |
22 | input_names=None,
23 | output_names=None,
24 |
25 | session_creator=None,
26 | session_init=None,
27 | return_input=False,
28 | create_graph=True,
29 | ):
30 | """
31 | You need to set either `model`, or `inputs_desc` plus `tower_func`.
32 | They are needed to construct the graph.
33 | You'll also have to set `output_names` as it does not have a default.
34 |
35 | Args:
36 | model (ModelDescBase): to be used to obtain inputs_desc and tower_func.
37 | inputs_desc ([InputDesc]):
38 | tower_func: a callable which takes input tensors and construct a tower.
39 |
40 | input_names (list): a list of input tensor names. Defaults to match inputs_desc.
41 | output_names (list): a list of names of the output tensors to predict, the
42 | tensors can be any computable tensor in the graph.
43 |
44 | session_creator (tf.train.SessionCreator): how to create the
45 | session. Defaults to :class:`tf.train.ChiefSessionCreator()`.
46 | session_init (SessionInit): how to initialize variables of the session.
47 | Defaults to do nothing.
48 |
49 | return_input (bool): same as in :attr:`PredictorBase.return_input`.
50 | create_graph (bool): create a new graph, or use the default graph
51 | when predictor is first initialized.
52 | """
53 | def assert_type(v, tp):
54 | assert isinstance(v, tp), v.__class__
55 | if model is not None:
56 | assert_type(model, ModelDescBase)
57 | assert inputs_desc is None and tower_func is None
58 | self.inputs_desc = model.get_inputs_desc()
59 | self.tower_func = TowerFuncWrapper(model.build_graph, self.inputs_desc)
60 | else:
61 | assert inputs_desc is not None and tower_func is not None
62 | self.inputs_desc = inputs_desc
63 | self.tower_func = TowerFuncWrapper(tower_func, inputs_desc)
64 |
65 | if session_init is None:
66 | session_init = JustCurrentSession()
67 | self.session_init = session_init
68 | assert_type(self.session_init, SessionInit)
69 |
70 | if session_creator is None:
71 | self.session_creator = tf.train.ChiefSessionCreator(config=get_default_sess_config())
72 | else:
73 | self.session_creator = session_creator
74 |
75 | # inputs & outputs
76 | self.input_names = input_names
77 | if self.input_names is None:
78 | self.input_names = [k.name for k in self.inputs_desc]
79 | self.output_names = output_names
80 | assert_type(self.output_names, list)
81 | assert_type(self.input_names, list)
82 | assert len(self.input_names), self.input_names
83 | for v in self.input_names:
84 | assert_type(v, six.string_types)
85 | assert len(self.output_names), self.output_names
86 |
87 | self.return_input = bool(return_input)
88 | self.create_graph = bool(create_graph)
89 |
90 | def _maybe_create_graph(self):
91 | if self.create_graph:
92 | return tf.Graph()
93 | return tf.get_default_graph()
94 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 |
5 | from .tower import get_current_tower_context, TowerContext
6 |
7 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
8 | STATICA_HACK = True
9 | globals()['kcah_acitats'[::-1].upper()] = False
10 | if STATICA_HACK:
11 | from .common import *
12 | from .sessinit import *
13 | from .argscope import *
14 |
15 |
16 | # don't want to include everything from .tower
17 | __all__ = ['get_current_tower_context', 'TowerContext']
18 |
19 |
20 | def _global_import(name):
21 | p = __import__(name, globals(), None, level=1)
22 | lst = p.__all__ if '__all__' in dir(p) else dir(p)
23 | for k in lst:
24 | if not k.startswith('__'):
25 | globals()[k] = p.__dict__[k]
26 | __all__.append(k)
27 |
28 |
29 | _TO_IMPORT = frozenset([
30 | 'common',
31 | 'sessinit',
32 | 'argscope',
33 | ])
34 |
35 | for module_name in _TO_IMPORT:
36 | _global_import(module_name)
37 |
38 | """
39 | TODO remove this line in the future.
40 | Better to keep submodule names (sesscreate, varmanip, etc) out of __all__,
41 | so that these names will be invisible under `tensorpack.` namespace.
42 |
43 | To use these utilities, users are expected to import them explicitly, e.g.:
44 |
45 | import tensorpack.tfutils.symbolic_functions as symbf
46 | """
47 | __all__.extend(['sessinit', 'summary', 'optimizer',
48 | 'sesscreate', 'gradproc', 'varreplace', 'symbolic_functions',
49 | 'distributed', 'tower'])
50 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/argscope.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: argscope.py
4 |
5 | from contextlib import contextmanager
6 | from collections import defaultdict
7 | import copy
8 |
9 | __all__ = ['argscope', 'get_arg_scope']
10 |
11 | _ArgScopeStack = []
12 |
13 |
14 | @contextmanager
15 | def argscope(layers, **kwargs):
16 | """
17 | Args:
18 | layers (list or layer): layer or list of layers to apply the arguments.
19 |
20 | Returns:
21 | a context where all appearance of these layer will by default have the
22 | arguments specified by kwargs.
23 |
24 | Example:
25 | .. code-block:: python
26 |
27 | with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
28 | x = Conv2D('conv0', x)
29 | x = Conv2D('conv1', x)
30 | x = Conv2D('conv2', x, out_channel=64) # override argscope
31 |
32 | """
33 | if not isinstance(layers, list):
34 | layers = [layers]
35 |
36 | # def _check_args_exist(l):
37 | # args = inspect.getargspec(l).args
38 | # for k, v in six.iteritems(kwargs):
39 | # assert k in args, "No argument {} in {}".format(k, l.__name__)
40 |
41 | for l in layers:
42 | assert hasattr(l, 'symbolic_function'), "{} is not a registered layer".format(l.__name__)
43 | # _check_args_exist(l.symbolic_function)
44 |
45 | new_scope = copy.copy(get_arg_scope())
46 | for l in layers:
47 | new_scope[l.__name__].update(kwargs)
48 | _ArgScopeStack.append(new_scope)
49 | yield
50 | del _ArgScopeStack[-1]
51 |
52 |
53 | def get_arg_scope():
54 | """
55 | Returns:
56 | dict: the current argscope.
57 |
58 | An argscope is a dict of dict: ``dict[layername] = {arg: val}``
59 | """
60 | if len(_ArgScopeStack) > 0:
61 | return _ArgScopeStack[-1]
62 | else:
63 | return defaultdict(dict)
64 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/distributed.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: distributed.py
4 |
5 |
6 | import tensorflow as tf
7 |
8 |
9 | def get_distributed_session_creator(server):
10 | """
11 | Args:
12 | server (tf.train.Server):
13 |
14 | Returns:
15 | tf.train.SessionCreator
16 | """
17 |
18 | server_def = server.server_def
19 | is_chief = (server_def.job_name == 'worker') and (server_def.task_index == 0)
20 |
21 | init_op = tf.global_variables_initializer()
22 | local_init_op = tf.local_variables_initializer()
23 | ready_op = tf.report_uninitialized_variables()
24 | ready_for_local_init_op = tf.report_uninitialized_variables(tf.global_variables())
25 | sm = tf.train.SessionManager(
26 | local_init_op=local_init_op,
27 | ready_op=ready_op,
28 | ready_for_local_init_op=ready_for_local_init_op,
29 | graph=tf.get_default_graph())
30 |
31 | # to debug wrong variable collection
32 | # from pprint import pprint
33 | # print("GLOBAL:")
34 | # pprint([(k.name, k.device) for k in tf.global_variables()])
35 | # print("LOCAL:")
36 | # pprint([(k.name, k.device) for k in tf.local_variables()])
37 |
38 | class _Creator(tf.train.SessionCreator):
39 | def create_session(self):
40 | if is_chief:
41 | return sm.prepare_session(master=server.target, init_op=init_op)
42 | else:
43 | tf.logging.set_verbosity(tf.logging.INFO) # print message about uninitialized vars
44 | ret = sm.wait_for_session(master=server.target)
45 | tf.logging.set_verbosity(tf.logging.WARN)
46 | return ret
47 |
48 | return _Creator()
49 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/model_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: model_utils.py
3 | # Author: tensorpack contributors
4 |
5 | import tensorflow as tf
6 | from termcolor import colored
7 | from tabulate import tabulate
8 |
9 | from ..utils import logger
10 |
11 | __all__ = []
12 |
13 |
14 | def describe_trainable_vars():
15 | """
16 | Print a description of the current model parameters.
17 | Skip variables starting with "tower", as they are just duplicates built by data-parallel logic.
18 | """
19 | train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
20 | if len(train_vars) == 0:
21 | logger.warn("No trainable variables in the graph!")
22 | return
23 | total = 0
24 | total_bytes = 0
25 | data = []
26 | for v in train_vars:
27 | if v.name.startswith('tower'):
28 | continue
29 | shape = v.get_shape()
30 | ele = shape.num_elements()
31 | total += ele
32 | total_bytes += ele * v.dtype.size
33 | data.append([v.name, shape.as_list(), ele, v.device, v.dtype.base_dtype.name])
34 | headers = ['name', 'shape', 'dim', 'device', 'dtype']
35 |
36 | dtypes = set([x[4] for x in data])
37 | if len(dtypes) == 1:
38 | for x in data:
39 | del x[4]
40 | del headers[4]
41 |
42 | devices = set([x[3] for x in data])
43 | if len(devices) == 1:
44 | # don't log the device if all vars on the same device
45 | for x in data:
46 | del x[3]
47 | del headers[3]
48 |
49 | table = tabulate(data, headers=headers)
50 |
51 | size_mb = total_bytes / 1024.0**2
52 | summary_msg = colored(
53 | "\nTotal #vars={}, #params={}, size={:.02f}MB".format(
54 | len(data), total, size_mb), 'cyan')
55 | logger.info(colored("Trainable Variables: \n", 'cyan') + table + summary_msg)
56 |
57 |
58 | def get_shape_str(tensors):
59 | """
60 | Internally used by layer registry, to print shapes of inputs/outputs of layers.
61 |
62 | Args:
63 | tensors (list or tf.Tensor): a tensor or a list of tensors
64 | Returns:
65 | str: a string to describe the shape
66 | """
67 | if isinstance(tensors, (list, tuple)):
68 | for v in tensors:
69 | assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v))
70 | shape_str = ",".join(
71 | map(lambda x: str(x.get_shape().as_list()), tensors))
72 | else:
73 | assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors))
74 | shape_str = str(tensors.get_shape().as_list())
75 | return shape_str
76 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/sesscreate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: sesscreate.py
4 |
5 |
6 | import tensorflow as tf
7 | from .common import get_default_sess_config
8 | from ..utils import logger
9 |
10 | __all__ = ['NewSessionCreator', 'ReuseSessionCreator', 'SessionCreatorAdapter']
11 |
12 | """
13 | A SessionCreator should:
14 | (optionally) finalize the graph
15 | create the session
16 | initialize all variables
17 | return a session that is ready to use
18 | """
19 |
20 |
21 | class NewSessionCreator(tf.train.ChiefSessionCreator):
22 | def __init__(self, target='', graph=None, config=None):
23 | """
24 | Args:
25 | target, graph, config: same as :meth:`Session.__init__()`.
26 | config: defaults to :func:`tfutils.get_default_sess_config()`
27 | """
28 | assert graph is None
29 |
30 | if config is None:
31 | # distributd trainer doesn't support user-provided config
32 | # we set this attribute so that they can check
33 | self.user_provided_config = False
34 | config = get_default_sess_config()
35 | else:
36 | self.user_provided_config = True
37 | logger.warn(
38 | "Some options in custom session config may not work due to TF \
39 | bugs. See https://github.com/ppwwyyxx/tensorpack/issues/497 for workarounds.")
40 |
41 | self.config = config
42 | super(NewSessionCreator, self).__init__(master=target, config=config)
43 |
44 |
45 | class ReuseSessionCreator(tf.train.SessionCreator):
46 | def __init__(self, sess):
47 | """
48 | Args:
49 | sess (tf.Session): the session to reuse
50 | """
51 | self.sess = sess
52 |
53 | def create_session(self):
54 | return self.sess
55 |
56 |
57 | class SessionCreatorAdapter(tf.train.SessionCreator):
58 | def __init__(self, session_creator, func):
59 | """
60 | Args:
61 | session_creator (tf.train.SessionCreator): a session creator
62 | func (tf.Session -> tf.Session): takes a session created by
63 | ``session_creator``, and return a new session to be returned by ``self.create_session``
64 | """
65 | self._creator = session_creator
66 | self._func = func
67 |
68 | def create_session(self):
69 | sess = self._creator.create_session()
70 | return self._func(sess)
71 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/tfutils/varreplace.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: varreplace.py
4 | # Credit: Qinyao He
5 |
6 | import tensorflow as tf
7 | from contextlib import contextmanager
8 |
9 | from .common import get_tf_version_number
10 |
11 | __all__ = ['freeze_variables', 'remap_variables']
12 |
13 |
14 | @contextmanager
15 | def custom_getter_scope(custom_getter):
16 | scope = tf.get_variable_scope()
17 | if get_tf_version_number() >= 1.5:
18 | with tf.variable_scope(
19 | scope, custom_getter=custom_getter,
20 | auxiliary_name_scope=False):
21 | yield
22 | else:
23 | ns = tf.get_default_graph().get_name_scope()
24 | with tf.variable_scope(
25 | scope, custom_getter=custom_getter):
26 | with tf.name_scope(ns + '/' if ns else ''):
27 | yield
28 |
29 |
30 | def remap_variables(fn):
31 | """
32 | Use fn to map the output of any variable getter.
33 |
34 | Args:
35 | fn (tf.Variable -> tf.Tensor)
36 |
37 | Returns:
38 | a context where all the variables will be mapped by fn.
39 |
40 | Example:
41 | .. code-block:: python
42 |
43 | with varreplace.remap_variables(lambda var: quantize(var)):
44 | x = FullyConnected('fc', x, 1000) # fc/{W,b} will be quantized
45 | """
46 | def custom_getter(getter, *args, **kwargs):
47 | v = getter(*args, **kwargs)
48 | return fn(v)
49 | return custom_getter_scope(custom_getter)
50 |
51 |
52 | def freeze_variables(stop_gradient=True, skip_collection=False):
53 | """
54 | Return a context to freeze variables,
55 | by wrapping ``tf.get_variable`` with a custom getter.
56 | It works by either applying ``tf.stop_gradient`` on the variables,
57 | or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
58 | both.
59 |
60 | Example:
61 | .. code-block:: python
62 |
63 | with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
64 | x = FullyConnected('fc', x, 1000) # fc/* will not be trained
65 |
66 | Args:
67 | stop_gradient (bool): if True, variables returned from `get_variable`
68 | will be wrapped with `tf.stop_gradient` and therefore has no
69 | gradient when used later. Note that the created variables may
70 | still have gradient when accessed by other approaches (e.g.
71 | by name, or by collection).
72 | skip_collection (bool): if True, do not add the variable to
73 | ``TRAINABLE_VARIABLES`` collection. As a result they will not be
74 | trained by default.
75 | """
76 | def custom_getter(getter, *args, **kwargs):
77 | trainable = kwargs.get('trainable', True)
78 | if skip_collection:
79 | kwargs['trainable'] = False
80 | v = getter(*args, **kwargs)
81 | if trainable and stop_gradient:
82 | v = tf.stop_gradient(v)
83 | return v
84 | return custom_getter_scope(custom_getter)
85 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/train/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 | # https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
5 | STATICA_HACK = True
6 | globals()['kcah_acitats'[::-1].upper()] = False
7 | if STATICA_HACK:
8 | from .base import *
9 | from .config import *
10 | from .interface import *
11 | from .tower import *
12 | from .trainers import *
13 |
14 |
15 | from pkgutil import iter_modules
16 | import os
17 | import os.path
18 |
19 | __all__ = []
20 |
21 |
22 | def global_import(name):
23 | p = __import__(name, globals(), locals(), level=1)
24 | lst = p.__all__ if '__all__' in dir(p) else []
25 | if lst:
26 | del globals()[name]
27 | for k in lst:
28 | globals()[k] = p.__dict__[k]
29 | __all__.append(k)
30 |
31 |
32 | _CURR_DIR = os.path.dirname(__file__)
33 | _SKIP = ['utility']
34 | for _, module_name, _ in iter_modules(
35 | [_CURR_DIR]):
36 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
37 | if not os.path.isfile(srcpath):
38 | continue
39 | if module_name.startswith('_'):
40 | continue
41 | if module_name not in _SKIP:
42 | global_import(module_name)
43 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/train/interface.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: interface.py
4 |
5 | from ..input_source import (
6 | InputSource, FeedInput, QueueInput, StagingInput, DummyConstantInput)
7 | from ..utils import logger
8 |
9 | from .config import TrainConfig
10 | from .tower import SingleCostTrainer
11 | from .trainers import SimpleTrainer
12 |
13 | __all__ = ['launch_train_with_config', 'apply_default_prefetch']
14 |
15 |
16 | def apply_default_prefetch(input_source_or_dataflow, trainer):
17 | """
18 | Apply a set of default rules to make a fast :class:`InputSource`.
19 |
20 | Args:
21 | input_source_or_dataflow(InputSource | DataFlow):
22 | trainer (Trainer):
23 |
24 | Returns:
25 | InputSource
26 | """
27 | if not isinstance(input_source_or_dataflow, InputSource):
28 | # to mimic same behavior of the old trainer interface
29 | if type(trainer) == SimpleTrainer:
30 | input = FeedInput(input_source_or_dataflow)
31 | else:
32 | logger.info("Automatically applying QueueInput on the DataFlow.")
33 | input = QueueInput(input_source_or_dataflow)
34 | else:
35 | input = input_source_or_dataflow
36 | if hasattr(trainer, 'devices'):
37 | towers = trainer.devices
38 | if len(towers) > 1:
39 | # seem to only improve on >1 GPUs
40 | assert not isinstance(trainer, SimpleTrainer)
41 |
42 | if not isinstance(input, (StagingInput, DummyConstantInput)):
43 | logger.info("Automatically applying StagingInput on the DataFlow.")
44 | input = StagingInput(input)
45 | return input
46 |
47 |
48 | def launch_train_with_config(config, trainer):
49 | """
50 | Train with a :class:`TrainConfig` and a :class:`Trainer`, to
51 | present a simple training interface. It basically does the following
52 | 3 things (and you can easily do them by yourself if you need more control):
53 |
54 | 1. Setup the input with automatic prefetching,
55 | from `config.data` or `config.dataflow`.
56 | 2. Call `trainer.setup_graph` with the input as well as `config.model`.
57 | 3. Call `trainer.train` with rest of the attributes of config.
58 |
59 | Args:
60 | config (TrainConfig):
61 | trainer (Trainer): an instance of :class:`SingleCostTrainer`.
62 |
63 | Examples:
64 |
65 | .. code-block:: python
66 |
67 | launch_train_with_config(
68 | config, SyncMultiGPUTrainerParameterServer(8, ps_device='gpu'))
69 | """
70 | assert isinstance(trainer, SingleCostTrainer), trainer
71 | assert isinstance(config, TrainConfig), config
72 | assert config.model is not None
73 | assert config.dataflow is not None or config.data is not None
74 |
75 | model = config.model
76 | inputs_desc = model.get_inputs_desc()
77 | input = config.data or config.dataflow
78 | input = apply_default_prefetch(input, trainer)
79 |
80 | trainer.setup_graph(
81 | inputs_desc, input,
82 | model._build_graph_get_cost, model.get_optimizer)
83 | trainer.train_with_defaults(
84 | callbacks=config.callbacks,
85 | monitors=config.monitors,
86 | session_creator=config.session_creator,
87 | session_init=config.session_init,
88 | steps_per_epoch=config.steps_per_epoch,
89 | starting_epoch=config.starting_epoch,
90 | max_epoch=config.max_epoch,
91 | extra_callbacks=config.extra_callbacks)
92 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/train/utility.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: utility.py
4 |
5 | # for backwards-compatibility
6 | from ..graph_builder.utils import ( # noqa
7 | OverrideToLocalVariable,
8 | override_to_local_variable, LeastLoadedDeviceSetter)
9 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 |
5 | from pkgutil import iter_modules
6 | import os
7 | import os.path
8 |
9 | __all__ = []
10 |
11 |
12 | def global_import(name):
13 | p = __import__(name, globals(), locals(), level=1)
14 | lst = p.__all__ if '__all__' in dir(p) else []
15 | del globals()[name]
16 | for k in lst:
17 | globals()[k] = p.__dict__[k]
18 | __all__.append(k)
19 |
20 |
21 | _CURR_DIR = os.path.dirname(__file__)
22 | _SKIP = ['utility']
23 | for _, module_name, _ in iter_modules(
24 | [_CURR_DIR]):
25 | srcpath = os.path.join(_CURR_DIR, module_name + '.py')
26 | if not os.path.isfile(srcpath):
27 | continue
28 | if module_name.startswith('_'):
29 | continue
30 | if module_name not in _SKIP:
31 | global_import(module_name)
32 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File: config.py
3 |
4 |
5 | __all__ = ['TrainConfig']
6 |
7 | from ..train.config import TrainConfig
8 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/distributed.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: distributed.py
4 |
5 | import os
6 |
7 | from ..utils import logger
8 | from ..callbacks import RunOp
9 | from ..tfutils.sesscreate import NewSessionCreator
10 | from ..tfutils import get_global_step_var
11 | from ..tfutils.distributed import get_distributed_session_creator
12 |
13 | from ..graph_builder.distributed import DistributedReplicatedBuilder
14 | from ..graph_builder.utils import override_to_local_variable
15 | from .base import Trainer
16 |
17 |
18 | __all__ = ['DistributedTrainerReplicated']
19 |
20 |
21 | class DistributedTrainerReplicated(Trainer):
22 |
23 | __doc__ = DistributedReplicatedBuilder.__doc__
24 |
25 | def __init__(self, config, server):
26 | """
27 | Args:
28 | config(TrainConfig): Must contain 'model' and 'data'.
29 | server(tf.train.Server): the server object with ps and workers
30 | """
31 | assert config.data is not None and config.model is not None
32 |
33 | self.server = server
34 | self.job_name = server.server_def.job_name
35 | assert self.job_name in ['ps', 'worker'], self.job_name
36 |
37 | if self.job_name == 'worker':
38 | # ps doesn't build any graph
39 | self._builder = DistributedReplicatedBuilder(config.tower, server)
40 | self.is_chief = self._builder.is_chief
41 | else:
42 | self.is_chief = False
43 | logger.info("Distributed training on cluster:\n" + str(server.server_def.cluster))
44 |
45 | self._input_source = config.data
46 |
47 | super(DistributedTrainerReplicated, self).__init__(config)
48 |
49 | def _setup(self):
50 | if self.job_name == 'ps':
51 | logger.info("Running ps {}".format(self.server.server_def.task_index))
52 | logger.info("Kill me with 'kill {}'".format(os.getpid()))
53 | self.server.join() # this will never return tensorflow#4713
54 | return
55 |
56 | with override_to_local_variable():
57 | get_global_step_var() # gs should be local
58 |
59 | # input source may create variable (queue size summary)
60 | # TODO This is not good because we don't know from here
61 | # whether something should be global or local. We now assume
62 | # they should be local.
63 | cbs = self._input_source.setup(self.model.get_inputs_desc())
64 | self._config.callbacks.extend(cbs)
65 |
66 | self.train_op, initial_sync_op, model_sync_op = self._builder.build(
67 | lambda: self.model._build_graph_get_grads(
68 | *self._input_source.get_input_tensors()),
69 | self.model.get_optimizer)
70 |
71 | # initial local_vars syncing
72 | cb = RunOp(lambda: initial_sync_op,
73 | run_before=True, run_as_trigger=False, verbose=True)
74 | cb.chief_only = False
75 | self.register_callback(cb)
76 |
77 | # model_variables syncing
78 | if model_sync_op:
79 | cb = RunOp(lambda: model_sync_op,
80 | run_before=False, run_as_trigger=True, verbose=True)
81 | logger.warn("For efficiency, local MODEL_VARIABLES are only synced to PS once "
82 | "every epoch. Be careful if you save the model more frequently than this.")
83 | self.register_callback(cb)
84 |
85 | self._set_session_creator()
86 |
87 | def _set_session_creator(self):
88 | old_sess_creator = self._config.session_creator
89 | if not isinstance(old_sess_creator, NewSessionCreator) \
90 | or old_sess_creator.user_provided_config:
91 | raise ValueError(
92 | "Cannot set session_creator or session_config for distributed training! "
93 | "To use a custom session config, pass it to tf.train.Server.")
94 |
95 | self._config.session_creator = get_distributed_session_creator(self.server)
96 |
97 | @property
98 | def _main_tower_vs_name(self):
99 | return "tower0"
100 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/interface.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: interface.py
4 |
5 |
6 | __all__ = ['launch_train_with_config']
7 |
8 | from ..train.interface import launch_train_with_config
9 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/simple.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: simple.py
3 |
4 |
5 | from .base import Trainer
6 |
7 | from ..tfutils.tower import TowerContext
8 | from ..utils import logger
9 | from ..input_source import FeedInput, QueueInput
10 |
11 | __all__ = ['SimpleTrainer', 'QueueInputTrainer']
12 |
13 |
14 | class SimpleTrainer(Trainer):
15 | """ A naive single-tower single-cost demo trainer.
16 | It simply builds one tower and minimize `model.cost`.
17 | It supports both InputSource and DataFlow.
18 |
19 | When DataFlow is given instead of InputSource, the InputSource to be
20 | used will be ``FeedInput(df)`` (no prefetch).
21 | """
22 |
23 | def __init__(self, config):
24 | """
25 | Args:
26 | config (TrainConfig): Must contain 'model' and either one of 'data' or 'dataflow'.
27 | """
28 | assert len(config.tower) == 1, \
29 | "Got nr_tower={}, but doesn't support multigpu!" \
30 | " Use Sync/AsyncMultiGPUTrainer instead.".format(len(config.tower))
31 |
32 | assert (config.data is not None or config.dataflow is not None) and config.model is not None
33 | if config.dataflow is None:
34 | self._input_source = config.data
35 | else:
36 | self._input_source = FeedInput(config.dataflow)
37 | logger.warn("FeedInput is slow (and this is the default of SimpleTrainer). "
38 | "Consider QueueInput or other InputSource instead.")
39 | super(SimpleTrainer, self).__init__(config)
40 |
41 | def _setup(self):
42 | cbs = self._input_source.setup(self.model.get_inputs_desc())
43 |
44 | with TowerContext('', is_training=True):
45 | grads = self.model._build_graph_get_grads(
46 | *self._input_source.get_input_tensors())
47 | opt = self.model.get_optimizer()
48 | self.train_op = opt.apply_gradients(grads, name='min_op')
49 |
50 | self._config.callbacks.extend(cbs)
51 |
52 |
53 | def QueueInputTrainer(config, input_queue=None):
54 | """
55 | A wrapper trainer which automatically wraps ``config.dataflow`` by a :class:`QueueInput`.
56 | It is an equivalent of ``SimpleTrainer(config)`` with ``config.data = QueueInput(dataflow)``.
57 |
58 | Args:
59 | config (TrainConfig): Must contain 'model' and 'dataflow'.
60 | input_queue (tf.QueueBase): an input queue. Defaults to the :class:`QueueInput` default.
61 | """
62 | assert (config.data is not None or config.dataflow is not None) and config.model is not None
63 | if config.data is not None:
64 | assert isinstance(config.data, QueueInput), config.data
65 | else:
66 | config.data = QueueInput(config.dataflow, input_queue)
67 | config.dataflow = None
68 | return SimpleTrainer(config)
69 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/trainv1/utility.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: utility.py
4 |
5 | # for backwards-compatibility
6 | from ..graph_builder.utils import ( # noqa
7 | override_to_local_variable, LeastLoadedDeviceSetter)
8 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: __init__.py
3 |
4 |
5 | from pkgutil import iter_modules
6 | import os
7 |
8 | """
9 | Common utils.
10 | These utils should be irrelevant to tensorflow.
11 | """
12 |
13 | __all__ = []
14 |
15 |
16 | # this two functions for back-compat only
17 | def get_nr_gpu():
18 | from .gpu import get_nr_gpu as gg
19 | logger.warn( # noqa
20 | "get_nr_gpu will not be automatically imported any more! "
21 | "Please do `from tensorpack.utils.gpu import get_nr_gpu`")
22 | return gg()
23 |
24 |
25 | def change_gpu(val):
26 | from .gpu import change_gpu as cg
27 | logger.warn( # noqa
28 | "change_gpu will not be automatically imported any more! "
29 | "Please do `from tensorpack.utils.gpu import change_gpu`")
30 | return cg(val)
31 |
32 |
33 | def get_rng(obj=None):
34 | from .utils import get_rng as gr
35 | logger.warn( # noqa
36 | "get_rng will not be automatically imported any more! "
37 | "Please do `from tensorpack.utils.utils import get_rng`")
38 | return gr(obj)
39 |
40 | # Import no submodules. they are supposed to be explicitly imported by users.
41 | __all__.extend(['logger', 'get_nr_gpu', 'change_gpu', 'get_rng'])
42 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/debug.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: debug.py
4 |
5 |
6 | import sys
7 |
8 |
9 | def enable_call_trace():
10 | """ Enable trace for calls to any function. """
11 | def tracer(frame, event, arg):
12 | if event == 'call':
13 | co = frame.f_code
14 | func_name = co.co_name
15 | if func_name == 'write' or func_name == 'print':
16 | # ignore write() calls from print statements
17 | return
18 | func_line_no = frame.f_lineno
19 | func_filename = co.co_filename
20 | caller = frame.f_back
21 | if caller:
22 | caller_line_no = caller.f_lineno
23 | caller_filename = caller.f_code.co_filename
24 | print('Call to `%s` on line %s:%s from %s:%s' %
25 | (func_name, func_filename, func_line_no,
26 | caller_filename, caller_line_no))
27 | return
28 | sys.settrace(tracer)
29 |
30 |
31 | if __name__ == '__main__':
32 | enable_call_trace()
33 |
34 | def b(a):
35 | print(2)
36 |
37 | def a():
38 | print(1)
39 | b(1)
40 |
41 | a()
42 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/fs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: fs.py
4 |
5 |
6 | import os
7 | from six.moves import urllib
8 | import errno
9 | import tqdm
10 | from . import logger
11 | from .utils import execute_only_once
12 |
13 | __all__ = ['mkdir_p', 'download', 'recursive_walk', 'get_dataset_path']
14 |
15 |
16 | def mkdir_p(dirname):
17 | """ Make a dir recursively, but do nothing if the dir exists
18 |
19 | Args:
20 | dirname(str):
21 | """
22 | assert dirname is not None
23 | if dirname == '' or os.path.isdir(dirname):
24 | return
25 | try:
26 | os.makedirs(dirname)
27 | except OSError as e:
28 | if e.errno != errno.EEXIST:
29 | raise e
30 |
31 |
32 | def download(url, dir, filename=None):
33 | """
34 | Download URL to a directory.
35 | Will figure out the filename automatically from URL, if not given.
36 | """
37 | mkdir_p(dir)
38 | if filename is None:
39 | filename = url.split('/')[-1]
40 | fpath = os.path.join(dir, filename)
41 |
42 | def hook(t):
43 | last_b = [0]
44 |
45 | def inner(b, bsize, tsize=None):
46 | if tsize is not None:
47 | t.total = tsize
48 | t.update((b - last_b[0]) * bsize)
49 | last_b[0] = b
50 | return inner
51 | try:
52 | with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
53 | fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
54 | statinfo = os.stat(fpath)
55 | size = statinfo.st_size
56 | except IOError:
57 | logger.error("Failed to download {}".format(url))
58 | raise
59 | assert size > 0, "Download an empty file!"
60 | # TODO human-readable size
61 | print('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
62 | return fpath
63 |
64 |
65 | def recursive_walk(rootdir):
66 | """
67 | Yields:
68 | str: All files in rootdir, recursively.
69 | """
70 | for r, dirs, files in os.walk(rootdir):
71 | for f in files:
72 | yield os.path.join(r, f)
73 |
74 |
75 | def get_dataset_path(*args):
76 | """
77 | Get the path to some dataset under ``$TENSORPACK_DATASET``.
78 |
79 | Args:
80 | args: strings to be joined to form path.
81 |
82 | Returns:
83 | str: path to the dataset.
84 | """
85 | d = os.environ.get('TENSORPACK_DATASET', None)
86 | if d is None:
87 | d = os.path.join(os.path.expanduser('~'), 'tensorpack_data')
88 | if execute_only_once():
89 | logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d))
90 | if not os.path.isdir(d):
91 | mkdir_p(d)
92 | logger.info("Created the directory {}.".format(d))
93 | assert os.path.isdir(d), d
94 | return os.path.join(d, *args)
95 |
96 |
97 | if __name__ == '__main__':
98 | download('http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz', '.')
99 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/globvars.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: globvars.py
4 |
5 |
6 | import six
7 | import argparse
8 | from . import logger
9 |
10 | __all__ = ['globalns', 'GlobalNS']
11 |
12 | if six.PY2:
13 | class NS:
14 | pass
15 | else:
16 | import types
17 | NS = types.SimpleNamespace
18 |
19 |
20 | # TODO make it singleton
21 |
22 | class GlobalNS(NS):
23 | """
24 | The class of the globalns instance.
25 | """
26 | def use_argument(self, args):
27 | """
28 | Add the content of :class:`argparse.Namespace` to this ns.
29 |
30 | Args:
31 | args (argparse.Namespace): arguments
32 | """
33 | assert isinstance(args, argparse.Namespace), type(args)
34 | for k, v in six.iteritems(vars(args)):
35 | if hasattr(self, k):
36 | logger.warn("Attribute {} in globalns will be overwritten!")
37 | setattr(self, k, v)
38 |
39 |
40 | globalns = GlobalNS()
41 | """
42 | A namespace to store global variables.
43 |
44 | Examples:
45 |
46 | .. code-block:: none
47 |
48 | import tensorpack.utils.globalns as G
49 |
50 | G.depth = 18
51 | G.batch_size = 1
52 | G.use_argument(parser.parse_args())
53 | """
54 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/gpu.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: gpu.py
4 |
5 |
6 | import os
7 | from .utils import change_env
8 | from . import logger
9 | from .nvml import NVMLContext
10 | from .concurrency import subproc_call
11 |
12 | __all__ = ['change_gpu', 'get_nr_gpu']
13 |
14 |
15 | def change_gpu(val):
16 | """
17 | Returns:
18 | a context where ``CUDA_VISIBLE_DEVICES=val``.
19 | """
20 | val = str(val)
21 | if val == '-1':
22 | val = ''
23 | return change_env('CUDA_VISIBLE_DEVICES', val)
24 |
25 |
26 | def get_nr_gpu():
27 | """
28 | Returns:
29 | int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
30 | """
31 | env = os.environ.get('CUDA_VISIBLE_DEVICES', None)
32 | if env is not None:
33 | return len(env.split(','))
34 | output, code = subproc_call("nvidia-smi -L", timeout=5)
35 | if code == 0:
36 | output = output.decode('utf-8')
37 | return len(output.strip().split('\n'))
38 | else:
39 | try:
40 | # Use NVML to query device properties
41 | with NVMLContext() as ctx:
42 | return ctx.num_devices()
43 | except Exception:
44 | # Fallback
45 | # Note this will initialize all GPUs and therefore has side effect
46 | # https://github.com/tensorflow/tensorflow/issues/8136
47 | logger.info("Loading local devices by TensorFlow ...")
48 | from tensorflow.python.client import device_lib
49 | local_device_protos = device_lib.list_local_devices()
50 | return len([x.name for x in local_device_protos if x.device_type == 'GPU'])
51 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/naming.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | # File: naming.py
3 |
4 |
5 | GLOBAL_STEP_INCR_OP_NAME = 'global_step_incr'
6 |
7 | # extra variables to summarize during training in a moving-average way
8 | MOVING_SUMMARY_OPS_KEY = 'MOVING_SUMMARY_OPS'
9 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/palette.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: palette.py
4 |
5 | import numpy as np
6 |
7 | __all__ = ['PALETTE_RGB']
8 |
9 | # copied from https://stackoverflow.com/questions/2328339/how-to-generate-n-different-colors-for-any-natural-number-n
10 | PALETTE_HEX = [
11 | "#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
12 | "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
13 | "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
14 | "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
15 | "#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
16 | "#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
17 | "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
18 | "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
19 | "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
20 | "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
21 | "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
22 | "#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
23 | "#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C",
24 | "#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
25 | "#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51",
26 | "#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94",
27 | "#7ED379", "#012C58"]
28 |
29 |
30 | def _parse_hex_color(s):
31 | r = int(s[1:3], 16)
32 | g = int(s[3:5], 16)
33 | b = int(s[5:7], 16)
34 | return (r, g, b)
35 |
36 |
37 | PALETTE_RGB = np.asarray(
38 | list(map(_parse_hex_color, PALETTE_HEX)),
39 | dtype='int32')
40 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/rect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: rect.py
4 |
5 |
6 | import numpy as np
7 |
8 | __all__ = ['IntBox', 'FloatBox']
9 |
10 |
11 | class BoxBase(object):
12 | __slots__ = ['x1', 'y1', 'x2', 'y2']
13 |
14 | def __init__(self, x1, y1, x2, y2):
15 | self.x1 = x1
16 | self.y1 = y1
17 | self.x2 = x2
18 | self.y2 = y2
19 |
20 | def copy(self):
21 | new = type(self)()
22 | for i in self.__slots__:
23 | setattr(new, i, getattr(self, i))
24 | return new
25 |
26 | def __str__(self):
27 | return '{}(x1={}, y1={}, x2={}, y2={})'.format(
28 | type(self).__name__, self.x1, self.y1, self.x2, self.y2)
29 |
30 | __repr__ = __str__
31 |
32 | def area(self):
33 | return self.w * self.h
34 |
35 | def is_box(self):
36 | return self.w > 0 and self.h > 0
37 |
38 |
39 | class IntBox(BoxBase):
40 | def __init__(self, x1, y1, x2, y2):
41 | for k in [x1, y1, x2, y2]:
42 | assert isinstance(k, int)
43 | super(IntBox, self).__init__(x1, y1, x2, y2)
44 |
45 | @property
46 | def w(self):
47 | return self.x2 - self.x1 + 1
48 |
49 | @property
50 | def h(self):
51 | return self.y2 - self.y1 + 1
52 |
53 | def is_valid_box(self, shape):
54 | """
55 | Check that this rect is a valid bounding box within this shape.
56 |
57 | Args:
58 | shape: int [h, w] or None.
59 | Returns:
60 | bool
61 | """
62 | if min(self.x1, self.y1) < 0:
63 | return False
64 | if min(self.w, self.h) <= 0:
65 | return False
66 | if self.x2 >= shape[1]:
67 | return False
68 | if self.y2 >= shape[0]:
69 | return False
70 | return True
71 |
72 | def clip_by_shape(self, shape):
73 | """
74 | Clip xs and ys to be valid coordinates inside shape
75 |
76 | Args:
77 | shape: int [h, w] or None.
78 | """
79 | self.x1 = np.clip(self.x1, 0, shape[1] - 1)
80 | self.x2 = np.clip(self.x2, 0, shape[1] - 1)
81 | self.y1 = np.clip(self.y1, 0, shape[0] - 1)
82 | self.y2 = np.clip(self.y2, 0, shape[0] - 1)
83 |
84 | def roi(self, img):
85 | assert self.is_valid_box(img.shape[:2]), "{} vs {}".format(self, img.shape[:2])
86 | return img[self.y1:self.y2 + 1, self.x1:self.x2 + 1]
87 |
88 |
89 | class FloatBox(BoxBase):
90 | def __init__(self, x1, y1, x2, y2):
91 | for k in [x1, y1, x2, y2]:
92 | assert isinstance(k, float), "type={},value={}".format(type(k), k)
93 | super(FloatBox, self).__init__(x1, y1, x2, y2)
94 |
95 | @property
96 | def w(self):
97 | return self.x2 - self.x1
98 |
99 | @property
100 | def h(self):
101 | return self.y2 - self.y1
102 |
103 | @staticmethod
104 | def from_intbox(intbox):
105 | return FloatBox(intbox.x1, intbox.y1,
106 | intbox.x2 + 1, intbox.y2 + 1)
107 |
108 | def clip_by_shape(self, shape):
109 | self.x1 = np.clip(self.x1, 0, shape[1])
110 | self.x2 = np.clip(self.x2, 0, shape[1])
111 | self.y1 = np.clip(self.y1, 0, shape[0])
112 | self.y2 = np.clip(self.y2, 0, shape[0])
113 |
114 |
115 | if __name__ == '__main__':
116 | x = IntBox(2, 1, 3, 3)
117 | img = np.random.rand(3, 3)
118 | print(img)
119 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/serialize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: serialize.py
4 |
5 | from .develop import create_dummy_func
6 |
7 | __all__ = ['loads', 'dumps']
8 |
9 |
10 | def dumps_msgpack(obj):
11 | """
12 | Serialize an object.
13 | Returns:
14 | Implementation-dependent bytes-like object
15 | """
16 | return msgpack.dumps(obj, use_bin_type=True)
17 |
18 |
19 | def loads_msgpack(buf):
20 | """
21 | Args:
22 | buf: the output of `dumps`.
23 | """
24 | return msgpack.loads(buf, raw=False)
25 |
26 |
27 | def dumps_pyarrow(obj):
28 | """
29 | Serialize an object.
30 |
31 | Returns:
32 | Implementation-dependent bytes-like object
33 | """
34 | return pa.serialize(obj).to_buffer()
35 |
36 |
37 | def loads_pyarrow(buf):
38 | """
39 | Args:
40 | buf: the output of `dumps`.
41 | """
42 | return pa.deserialize(buf)
43 |
44 |
45 | try:
46 | # fixed in pyarrow 0.9: https://github.com/apache/arrow/pull/1223#issuecomment-359895666
47 | import pyarrow as pa
48 | except ImportError:
49 | pa = None
50 | dumps_pyarrow = create_dummy_func('dumps_pyarrow', ['pyarrow']) # noqa
51 | loads_pyarrow = create_dummy_func('loads_pyarrow', ['pyarrow']) # noqa
52 |
53 | try:
54 | import msgpack
55 | import msgpack_numpy
56 | msgpack_numpy.patch()
57 | except ImportError:
58 | assert pa is not None, "pyarrow is a dependency of tensorpack!"
59 | loads_msgpack = create_dummy_func( # noqa
60 | 'loads_msgpack', ['msgpack', 'msgpack_numpy'])
61 | dumps_msgpack = create_dummy_func( # noqa
62 | 'dumps_msgpack', ['msgpack', 'msgpack_numpy'])
63 |
64 | if pa is None:
65 | loads = loads_msgpack
66 | dumps = dumps_msgpack
67 | else:
68 | loads = loads_pyarrow
69 | dumps = dumps_pyarrow
70 |
--------------------------------------------------------------------------------
/tensorpack-installed/tensorpack/utils/timer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # File: timer.py
4 |
5 |
6 | from contextlib import contextmanager
7 | import time
8 | from collections import defaultdict
9 | import six
10 | import atexit
11 |
12 | from .stats import StatCounter
13 | from . import logger
14 |
15 | __all__ = ['total_timer', 'timed_operation',
16 | 'print_total_timer', 'IterSpeedCounter']
17 |
18 |
19 | @contextmanager
20 | def timed_operation(msg, log_start=False):
21 | """
22 | Surround a context with a timer.
23 |
24 | Args:
25 | msg(str): the log to print.
26 | log_start(bool): whether to print also at the beginning.
27 |
28 | Example:
29 | .. code-block:: python
30 |
31 | with timed_operation('Good Stuff'):
32 | time.sleep(1)
33 |
34 | Will print:
35 |
36 | .. code-block:: python
37 |
38 | Good stuff finished, time:1sec.
39 | """
40 | if log_start:
41 | logger.info('Start {} ...'.format(msg))
42 | start = time.time()
43 | yield
44 | logger.info('{} finished, time:{:.4f}sec.'.format(
45 | msg, time.time() - start))
46 |
47 |
48 | _TOTAL_TIMER_DATA = defaultdict(StatCounter)
49 |
50 |
51 | @contextmanager
52 | def total_timer(msg):
53 | """ A context which add the time spent inside to TotalTimer. """
54 | start = time.time()
55 | yield
56 | t = time.time() - start
57 | _TOTAL_TIMER_DATA[msg].feed(t)
58 |
59 |
60 | def print_total_timer():
61 | """
62 | Print the content of the TotalTimer, if it's not empty. This function will automatically get
63 | called when program exits.
64 | """
65 | if len(_TOTAL_TIMER_DATA) == 0:
66 | return
67 | for k, v in six.iteritems(_TOTAL_TIMER_DATA):
68 | logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
69 | k, v.sum, v.count, v.average))
70 |
71 |
72 | atexit.register(print_total_timer)
73 |
74 |
75 | class IterSpeedCounter(object):
76 | """ Test how often some code gets reached.
77 |
78 | Example:
79 | Print the speed of the iteration every 100 times.
80 |
81 | .. code-block:: python
82 |
83 | speed = IterSpeedCounter(100)
84 | for k in range(1000):
85 | # do something
86 | speed()
87 | """
88 |
89 | def __init__(self, print_every, name=None):
90 | """
91 | Args:
92 | print_every(int): interval to print.
93 | name(str): name to used when print.
94 | """
95 | self.cnt = 0
96 | self.print_every = int(print_every)
97 | self.name = name if name else 'IterSpeed'
98 |
99 | def reset(self):
100 | self.start = time.time()
101 |
102 | def __call__(self):
103 | if self.cnt == 0:
104 | self.reset()
105 | self.cnt += 1
106 | if self.cnt % self.print_every != 0:
107 | return
108 | t = time.time() - self.start
109 | logger.info("{}: {:.2f} sec, {} times, {:.3g} sec/time".format(
110 | self.name, t, self.cnt, t / self.cnt))
111 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/case_script.py:
--------------------------------------------------------------------------------
1 | from abc import abstractproperty
2 | import unittest
3 | import subprocess
4 | import shlex
5 | import sys
6 | import threading
7 | import os
8 | import shutil
9 |
10 |
11 | class PythonScript(threading.Thread):
12 | """A wrapper to start a python script with timeout.
13 |
14 | To test the actual models even without GPUs we simply start them and
15 | test whether they survive a certain amount of time "timeout". This allows to
16 | test if all imports are correct and the computation graph can be built without
17 | run the entire model on the CPU.
18 |
19 | Attributes:
20 | cmd (str): command to execute the example with all flags (including python)
21 | p: process handle
22 | timeout (int): timeout in seconds
23 | """
24 | def __init__(self, cmd, timeout):
25 | """Prepare a python script
26 |
27 | Args:
28 | cmd (str): command to execute the example with all flags (including python)
29 | timeout (int): time in seconds the script has to survive
30 | """
31 | threading.Thread.__init__(self)
32 | self.cmd = cmd
33 | self.timeout = timeout
34 |
35 | def run(self):
36 | self.p = subprocess.Popen(shlex.split(self.cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
37 | self.out, self.err = self.p.communicate()
38 |
39 | def execute(self):
40 | """Execute python script in other process.
41 |
42 | Raises:
43 | SurviveException: contains the error message of the script if it terminated before timeout
44 | """
45 | self.start()
46 | self.join(self.timeout)
47 |
48 | if self.is_alive():
49 | self.p.terminate()
50 | self.p.kill() # kill -9
51 | self.join()
52 | else:
53 | # something unexpected happend here, this script was supposed to survive at least the timeout
54 | if len(self.err) > 0:
55 | output = u"STDOUT: \n\n\n" + self.out.decode('utf-8')
56 | output += u"\n\n\n STDERR: \n\n\n" + self.err.decode('utf-8')
57 | raise AssertionError(output)
58 |
59 |
60 | class TestPythonScript(unittest.TestCase):
61 |
62 | @abstractproperty
63 | def script(self):
64 | pass
65 |
66 | @staticmethod
67 | def clear_trainlog(script):
68 | script = os.path.basename(script)
69 | script = script[:-3]
70 | if os.path.isdir(os.path.join("train_log", script)):
71 | shutil.rmtree(os.path.join("train_log", script))
72 |
73 | def assertSurvive(self, script, args=None, timeout=20): # noqa
74 | cmd = "python{} {}".format(sys.version_info.major, script)
75 | if args:
76 | cmd += " " + " ".join(args)
77 | PythonScript(cmd, timeout=timeout).execute()
78 |
79 | def setUp(self):
80 | TestPythonScript.clear_trainlog(self.script)
81 |
82 | def tearDown(self):
83 | TestPythonScript.clear_trainlog(self.script)
84 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/dev/git-hooks/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | flake8 .
3 |
4 | cd examples
5 | GIT_ARG="--git-dir ../.git --work-tree .."
6 |
7 | # find out modified python files, so that we ignored unstaged files
8 | # exclude ../docs
9 | MOD=$(git $GIT_ARG status -s | grep -E '\.py$' \
10 | | grep -E '^\b+M\b+|^A' | cut -c 4- | grep -v '../docs')
11 | if [[ -n $MOD ]]; then
12 | flake8 $MOD
13 | fi
14 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/install-tensorflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | if [ $TF_TYPE == "release" ]; then
4 | if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then
5 | TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TF_VERSION}-cp27-none-linux_x86_64.whl
6 | fi
7 | if [[ $TRAVIS_PYTHON_VERSION == 3.4* ]]; then
8 | TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TF_VERSION}-cp34-cp34m-linux_x86_64.whl
9 | fi
10 | if [[ $TRAVIS_PYTHON_VERSION == 3.5* ]]; then
11 | TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TF_VERSION}-cp35-cp35m-linux_x86_64.whl
12 | fi
13 | if [[ $TRAVIS_PYTHON_VERSION == 3.6* ]]; then
14 | TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TF_VERSION}-cp36-cp36m-linux_x86_64.whl
15 | fi
16 | fi
17 | if [ $TF_TYPE == "nightly" ]; then
18 | TF_BINARY_URL="tf-nightly"
19 | fi
20 |
21 |
22 | pip install --upgrade ${TF_BINARY_URL}
23 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/run-tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | # File: run-tests.sh
3 | # Author: Yuxin Wu
4 |
5 | DIR=$(dirname $0)
6 | cd $DIR
7 |
8 | export TF_CPP_MIN_LOG_LEVEL=2
9 | # test import (#471)
10 | python -c 'from tensorpack.dataflow.imgaug import transform'
11 |
12 | python -m unittest discover -v
13 | # python -m tensorpack.models._test
14 | # segfault for no reason (https://travis-ci.org/ppwwyyxx/tensorpack/jobs/217702985)
15 |
16 | # python ../tensorpack/user_ops/test-recv-op.py
17 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/test_char_rnn.py:
--------------------------------------------------------------------------------
1 | from case_script import TestPythonScript
2 | import os
3 |
4 |
5 | def random_content():
6 | return ('Lorem ipsum dolor sit amet\n'
7 | 'consetetur sadipscing elitr\n'
8 | 'sed diam nonumy eirmod tempor invidunt ut labore\n')
9 |
10 |
11 | class CharRNNTest(TestPythonScript):
12 |
13 | @property
14 | def script(self):
15 | return '../examples/Char-RNN/char-rnn.py'
16 |
17 | def setUp(self):
18 | super(CharRNNTest, self).setUp()
19 | with open('input.txt', 'w') as f:
20 | f.write(random_content())
21 |
22 | def test(self):
23 | self.assertSurvive(self.script, args=['train'])
24 |
25 | def tearDown(self):
26 | super(CharRNNTest, self).tearDown()
27 | os.remove('input.txt')
28 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/test_infogan.py:
--------------------------------------------------------------------------------
1 | from case_script import TestPythonScript
2 |
3 | from tensorpack.tfutils.common import get_tf_version_number
4 |
5 |
6 | class InfoGANTest(TestPythonScript):
7 |
8 | @property
9 | def script(self):
10 | return '../examples/GAN/InfoGAN-mnist.py'
11 |
12 | def test(self):
13 | if get_tf_version_number() < 1.4:
14 | return True # requires leaky_relu
15 | self.assertSurvive(self.script, args=None)
16 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/test_mnist.py:
--------------------------------------------------------------------------------
1 | from case_script import TestPythonScript
2 |
3 |
4 | class MnistTest(TestPythonScript):
5 |
6 | @property
7 | def script(self):
8 | return '../examples/basics/mnist-convnet.py'
9 |
10 | def test(self):
11 | self.assertSurvive(self.script, args=None)
12 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/test_mnist_similarity.py:
--------------------------------------------------------------------------------
1 | from case_script import TestPythonScript
2 |
3 |
4 | class SimilarityLearningTest(TestPythonScript):
5 |
6 | @property
7 | def script(self):
8 | return '../examples/SimilarityLearning/mnist-embeddings.py'
9 |
10 | def test(self):
11 | self.assertSurvive(self.script, args=['--algorithm triplet'], timeout=10)
12 |
--------------------------------------------------------------------------------
/tensorpack-installed/tests/test_resnet.py:
--------------------------------------------------------------------------------
1 | from case_script import TestPythonScript
2 |
3 |
4 | class ResnetTest(TestPythonScript):
5 | @property
6 | def script(self):
7 | return '../examples/ResNet/imagenet-resnet.py'
8 |
9 | def test(self):
10 | self.assertSurvive(
11 | self.script,
12 | args=['--fake', '--data_format NHWC'], timeout=20)
13 |
--------------------------------------------------------------------------------
/tensorpack-installed/tox.ini:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 120
3 | ignore = E265,E741,E742,E743
4 | exclude = .git,
5 | __init__.py,
6 | setup.py,
7 | docs,
8 | examples,
9 | docs/conf.py
10 | snippet,
11 | examples-old,
12 | _test.py,
13 |
--------------------------------------------------------------------------------