├── HISTORY.rst
├── LICENSE
├── README.md
├── TODO.md
├── docs
├── Makefile
├── api.rst
├── conf.py
├── contact.rst
├── favicon.png
├── goals.rst
├── index.rst
├── requirements.txt
├── tntorch.svg
├── tntorch_borders.svg
├── tutorial-notebooks.rst
└── tutorials
│ ├── active_subspaces.ipynb
│ ├── anova.ipynb
│ ├── arithmetics.ipynb
│ ├── automata.ipynb
│ ├── classification.ipynb
│ ├── completion.ipynb
│ ├── cross.ipynb
│ ├── decompositions.ipynb
│ ├── derivatives.ipynb
│ ├── exponential_machines.ipynb
│ ├── introduction.ipynb
│ ├── logic.ipynb
│ ├── main_formats.ipynb
│ ├── other_formats.ipynb
│ ├── pce.ipynb
│ ├── sobol.ipynb
│ └── vector_fields.ipynb
├── images
├── tensors.jpg
└── text.png
├── setup.py
├── tests
├── test_automata.py
├── test_cross.py
├── test_derivatives.py
├── test_gpu.py
├── test_indexing.py
├── test_init.py
├── test_ops.py
├── test_round.py
├── test_tools.py
└── util.py
├── tntorch
├── __init__.py
├── anova.py
├── autodiff.py
├── automata.py
├── create.py
├── cross.py
├── derivatives.py
├── logic.py
├── metrics.py
├── ops.py
├── round.py
├── tensor.py
└── tools.py
└── tutorials
/HISTORY.rst:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VMML/tntorch/8c81a1cbb0c5b19db7c26a787acfca35e0fbd960/HISTORY.rst
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://tntorch.readthedocs.io/en/latest/?badge=latest)
2 |
3 | # tntorch - Tensor Network Learning with PyTorch
4 |
5 | **New**: our [Read the Docs site](http://tntorch.readthedocs.io/) is out!
6 |
7 | [Welcome to *tntorch*](https://github.com/VMML/tntorch/blob/master/docs/tutorials/introduction.ipynb), a PyTorch-powered modeling and learning library using tensor networks. Such networks are unique in that [they use *multilinear* neural units](https://arxiv.org/abs/1711.00811) (instead of non-linear activation units). Features include:
8 |
9 | - Basic and fancy **indexing** of tensors, **broadcasting**, **assignment**, etc.
10 | - Tensor **decomposition** and **reconstruction**
11 | - Element-wise and tensor-tensor **arithmetics**
12 | - Building tensors from black-box functions using **cross-approximation**
13 | - **Statistics** and **sensitivity analysis**
14 | - **Optimization** using autodifferentiation
15 | - **Misc. operations** on tensors: stacking, unfolding, sampling, derivating, etc.
16 |
17 |
18 | Available [tensor formats](https://github.com/rballester/tntorch/blob/master/docs/tutorials/main_formats.ipynb) include:
19 |
20 | - [CANDECOMP/PARAFAC (CP)](https://epubs.siam.org/doi/pdf/10.1137/07070111X)
21 | - [Tucker](https://epubs.siam.org/doi/pdf/10.1137/S0895479898346995)
22 | - [Tensor train (TT)](https://epubs.siam.org/doi/abs/10.1137/090752286?journalCode=sjoce3)
23 | - Hybrids: CP-Tucker, TT-Tucker, etc.
24 | - [Partial support](https://github.com/rballester/tntorch/blob/master/docs/tutorials/other_formats.ipynb) for other decompositions such as [INDSCAL, CANDELINC, DEDICOM, PARATUCK2](https://epubs.siam.org/doi/pdf/10.1137/07070111X), and custom formats
25 |
26 | For example, the following networks both represent a 4D tensor (i.e. a real function that can take I1 x I2 x I3 x I4 possible values) in the TT and TT-Tucker formats:
27 |
28 |
29 |
30 | In *tntorch*, **all tensor decompositions share the same interface**. You can handle them in a transparent form, as if they were plain NumPy arrays or PyTorch tensors:
31 |
32 | ```
33 | > import tntorch as tn
34 | > t = tn.randn(32, 32, 32, 32, ranks_tt=5) # Random 4D TT tensor of shape 32 x 32 x 32 x 32 and TT-rank 5
35 | > print(t)
36 |
37 | 4D TT tensor:
38 |
39 | 32 32 32 32
40 | | | | |
41 | (0) (1) (2) (3)
42 | / \ / \ / \ / \
43 | 1 5 5 5 1
44 |
45 | > print(tn.mean(t))
46 |
47 | tensor(8.0388)
48 |
49 | > print(tn.norm(t))
50 |
51 | tensor(9632.3726)
52 | ```
53 |
54 | Decompressing tensors is easy:
55 |
56 | ```
57 | > print(t.torch().shape)
58 | torch.Size([32, 32, 32, 32])
59 | ```
60 |
61 | Thanks to PyTorch's automatic differentiation, you can easily define all sorts of loss functions on tensors:
62 |
63 | ```
64 | def loss(t):
65 | return torch.norm(t[:, 0, 10:, [3, 4]].torch()) # NumPy-like "fancy indexing" for arrays
66 | ```
67 |
68 | Most importantly, loss functions can be defined on **compressed** tensors as well:
69 |
70 | ```
71 | def loss(t):
72 | return tn.norm(t[:3, :3, :3, :3] - t[-3:, -3:, -3:, -3:])
73 | ```
74 |
75 | Check out the [introductory notebook](https://github.com/rballester/tntorch/blob/master/docs/tutorials/introduction.ipynb) for all the details on the basics.
76 |
77 | ## Tutorial Notebooks
78 |
79 | - [Introduction](https://github.com/rballester/tntorch/blob/master/docs/tutorials/introduction.ipynb)
80 | - [Active subspaces](https://github.com/rballester/tntorch/blob/master/docs/tutorials/active_subspaces.ipynb)
81 | - [ANOVA decomposition](https://github.com/rballester/tntorch/blob/master/docs/tutorials/anova.ipynb)
82 | - [Boolean logic](https://github.com/rballester/tntorch/blob/master/docs/tutorials/logic.ipynb)
83 | - [Classification](https://github.com/rballester/tntorch/blob/master/docs/tutorials/classification.ipynb)
84 | - [Cross-approximation](https://github.com/rballester/tntorch/blob/master/docs/tutorials/cross.ipynb)
85 | - [Differentiation](https://github.com/rballester/tntorch/blob/master/docs/tutorials/derivatives.ipynb)
86 | - [Discrete/weighted finite automata](https://github.com/rballester/tntorch/blob/master/docs/tutorials/automata.ipynb)
87 | - [Exponential machines](https://github.com/rballester/tntorch/blob/master/docs/tutorials/exponential_machines.ipynb)
88 | - [Main tensor formats available](https://github.com/rballester/tntorch/blob/master/docs/tutorials/main_formats.ipynb)
89 | - [Other custom formats](https://github.com/rballester/tntorch/blob/master/docs/tutorials/other_formats.ipynb)
90 | - [Polynomial chaos expansions](https://github.com/rballester/tntorch/blob/master/docs/tutorials/pce.ipynb)
91 | - [Tensor arithmetics](https://github.com/rballester/tntorch/blob/master/docs/tutorials/arithmetics.ipynb)
92 | - [Tensor completion and regression](https://github.com/rballester/tntorch/blob/master/docs/tutorials/completion.ipynb)
93 | - [Tensor decomposition](https://github.com/rballester/tntorch/blob/master/docs/tutorials/decompositions.ipynb)
94 | - [Sensitivity analysis](https://github.com/rballester/tntorch/blob/master/docs/tutorials/sobol.ipynb)
95 | - [Vector field data](https://github.com/rballester/tntorch/blob/master/docs/tutorials/vector_fields.ipynb)
96 |
97 | ## Installation
98 |
99 | The main dependencies are *NumPy* and *PyTorch*. To download and install *tntorch*:
100 |
101 | ```
102 | git clone https://github.com/rballester/tntorch.git
103 | cd tntorch
104 | pip install .
105 | ```
106 |
107 | ## Testing
108 |
109 | We use [*pytest*](https://docs.pytest.org/en/latest/). Simply run:
110 |
111 | ```
112 | cd tests/
113 | pytest
114 | ```
115 |
116 | ## Contributing
117 |
118 | Pull requests are welcome!
119 |
120 | Besides using the [issue tracker](https://github.com/rballester/tntorch/issues), feel also free to contact me at .
121 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | ## TODO
2 |
3 | - Fix and polish __setitem__
4 | - Save/load tensors
5 | - Encapsulated Regressor() and Classifier() classes
6 | - Make round() more efficient by mixing round_tucker() and round_tt()
7 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | API Documentation
2 | =================
3 |
4 |
5 | .. automodule:: tntorch
6 | :members:
7 | :undoc-members:
8 | :show-inheritance:
9 |
10 |
11 | anova
12 | -----
13 |
14 | .. automodule:: anova
15 | :members:
16 | :undoc-members:
17 | :inherited-members:
18 | :show-inheritance:
19 |
20 | autodiff
21 | --------
22 |
23 | .. automodule:: autodiff
24 | :members:
25 | :undoc-members:
26 | :inherited-members:
27 | :show-inheritance:
28 |
29 | automata
30 | --------
31 |
32 | .. automodule:: automata
33 | :members:
34 | :undoc-members:
35 | :inherited-members:
36 | :show-inheritance:
37 |
38 | create
39 | ------
40 |
41 | .. automodule:: create
42 | :members:
43 | :undoc-members:
44 | :inherited-members:
45 | :show-inheritance:
46 |
47 | cross
48 | -----
49 |
50 | .. automodule:: cross
51 | :members:
52 | :undoc-members:
53 | :inherited-members:
54 | :show-inheritance:
55 |
56 | derivatives
57 | -----------
58 |
59 | .. automodule:: derivatives
60 | :members:
61 | :undoc-members:
62 | :inherited-members:
63 | :show-inheritance:
64 |
65 | logic
66 | -----
67 |
68 | .. automodule:: logic
69 | :members:
70 | :undoc-members:
71 | :inherited-members:
72 | :show-inheritance:
73 |
74 | metrics
75 | -------
76 |
77 | .. automodule:: metrics
78 | :members:
79 | :undoc-members:
80 | :inherited-members:
81 | :show-inheritance:
82 |
83 | ops
84 | ---
85 |
86 | .. automodule:: ops
87 | :members:
88 | :undoc-members:
89 | :inherited-members:
90 | :show-inheritance:
91 |
92 | round
93 | -----
94 |
95 | .. automodule:: round
96 | :members:
97 | :undoc-members:
98 | :inherited-members:
99 | :show-inheritance:
100 |
101 | tensor
102 | ------
103 |
104 | .. automodule:: tensor
105 | :members:
106 | :undoc-members:
107 | :inherited-members:
108 | :show-inheritance:
109 |
110 | tools
111 | -----
112 |
113 | .. automodule:: tools
114 | :members:
115 | :undoc-members:
116 | :inherited-members:
117 | :show-inheritance:
118 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 |
16 | import os
17 | import sys
18 | sys.path.append(os.path.join(os.path.dirname(__name__), '../tntorch'))
19 | sys.path.append(os.path.join(os.path.dirname(__name__), 'tutorials'))
20 |
21 | # -- Project information -----------------------------------------------------
22 |
23 | project = 'tntorch'
24 | copyright = '2019, Rafael Ballester-Ripoll'
25 | author = 'Rafael Ballester-Ripoll'
26 |
27 | # The short X.Y version
28 | version = ''
29 | # The full version, including alpha/beta/rc tags
30 | release = '0.1'
31 |
32 | html_logo = 'tntorch_borders.svg'
33 | html_favicon = 'favicon.png'
34 |
35 | # -- General configuration ---------------------------------------------------
36 |
37 | # If your documentation needs a minimal Sphinx version, state it here.
38 | #
39 | # needs_sphinx = '1.0'
40 |
41 | # Add any Sphinx extension module names here, as strings. They can be
42 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
43 | # ones.
44 | extensions = [
45 | 'nbsphinx',
46 | 'sphinx.ext.autodoc',
47 | 'sphinx.ext.coverage',
48 | 'sphinx.ext.mathjax',
49 | 'sphinx.ext.viewcode',
50 | 'sphinx.ext.githubpages',
51 | ]
52 | #mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
53 |
54 | # Add any paths that contain templates here, relative to this directory.
55 | templates_path = ['_templates']
56 |
57 | autosummary_generate = True
58 |
59 | autodoc_default_flags = ['members', 'inherited-members']
60 |
61 | # The suffix(es) of source filenames.
62 | # You can specify multiple suffix as a list of string:
63 | #
64 | # source_suffix = ['.rst', '.md']
65 | source_suffix = '.rst'
66 |
67 | # The master toctree document.
68 | master_doc = 'index'
69 |
70 | # The language for content autogenerated by Sphinx. Refer to documentation
71 | # for a list of supported languages.
72 | #
73 | # This is also used if you do content translation via gettext catalogs.
74 | # Usually you set "language" from the command line for these cases.
75 | language = None
76 |
77 | # List of patterns, relative to source directory, that match files and
78 | # directories to ignore when looking for source files.
79 | # This pattern also affects html_static_path and html_extra_path.
80 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
81 |
82 | # The name of the Pygments (syntax highlighting) style to use.
83 | pygments_style = 'sphinx'
84 |
85 |
86 | # -- Options for HTML output -------------------------------------------------
87 |
88 | # The theme to use for HTML and HTML Help pages. See the documentation for
89 | # a list of builtin themes.
90 | #
91 | html_theme = 'sphinx_rtd_theme'
92 |
93 | # Theme options are theme-specific and customize the look and feel of a theme
94 | # further. For a list of options available for each theme, see the
95 | # documentation.
96 | html_theme_options = {
97 | 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text
98 | }
99 |
100 | # Add any paths that contain custom static files (such as style sheets) here,
101 | # relative to this directory. They are copied after the builtin static files,
102 | # so a file named "default.css" will overwrite the builtin "default.css".
103 | html_static_path = ['_static']
104 |
105 | # Custom sidebar templates, must be a dictionary that maps document names
106 | # to template names.
107 | #
108 | # The default sidebars (for documents that don't match any pattern) are
109 | # defined by theme itself. Builtin themes are using these templates by
110 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
111 | # 'searchbox.html']``.
112 | #
113 | # html_sidebars = {}
114 |
115 |
116 | # -- Options for HTMLHelp output ---------------------------------------------
117 |
118 | # Output file base name for HTML help builder.
119 | htmlhelp_basename = 'tntorchdoc'
120 |
121 |
122 | # -- Options for LaTeX output ------------------------------------------------
123 |
124 | latex_elements = {
125 | # The paper size ('letterpaper' or 'a4paper').
126 | #
127 | # 'papersize': 'letterpaper',
128 |
129 | # The font size ('10pt', '11pt' or '12pt').
130 | #
131 | # 'pointsize': '10pt',
132 |
133 | # Additional stuff for the LaTeX preamble.
134 | #
135 | # 'preamble': '',
136 |
137 | # Latex figure (float) alignment
138 | #
139 | # 'figure_align': 'htbp',
140 | }
141 |
142 | # Grouping the document tree into LaTeX files. List of tuples
143 | # (source start file, target name, title,
144 | # author, documentclass [howto, manual, or own class]).
145 | latex_documents = [
146 | (master_doc, 'tntorch.tex', 'tntorch Documentation',
147 | 'Rafael Ballester-Ripoll', 'manual'),
148 | ]
149 |
150 |
151 | # -- Options for manual page output ------------------------------------------
152 |
153 | # One entry per manual page. List of tuples
154 | # (source start file, name, description, authors, manual section).
155 | man_pages = [
156 | (master_doc, 'tntorch', 'tntorch Documentation',
157 | [author], 1)
158 | ]
159 |
160 |
161 | # -- Options for Texinfo output ----------------------------------------------
162 |
163 | # Grouping the document tree into Texinfo files. List of tuples
164 | # (source start file, target name, title, author,
165 | # dir menu entry, description, category)
166 | texinfo_documents = [
167 | (master_doc, 'tntorch', 'tntorch Documentation',
168 | author, 'tntorch', 'Tensor Network Learning with PyTorch.',
169 | 'Miscellaneous'),
170 | ]
171 |
172 |
173 | # -- Options for Epub output -------------------------------------------------
174 |
175 | # Bibliographic Dublin Core info.
176 | epub_title = project
177 |
178 | # The unique identifier of the text. This can be a ISBN number
179 | # or the project homepage.
180 | #
181 | # epub_identifier = ''
182 |
183 | # A unique identification for the text.
184 | #
185 | # epub_uid = ''
186 |
187 | # A list of files that should not be packed into the epub file.
188 | epub_exclude_files = ['search.html']
189 |
190 | autoclass_content = 'both' # Shows documentation for __init__() methods
191 |
192 | from unittest.mock import MagicMock
193 | class Mock(MagicMock):
194 | @classmethod
195 | def __getattr__(cls, name):
196 | return MagicMock()
197 |
198 | MOCK_MODULES = ['maxvolpy', 'maxvolpy.maxvol']
199 | sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
200 |
--------------------------------------------------------------------------------
/docs/contact.rst:
--------------------------------------------------------------------------------
1 | Contact/Contributing
2 | ====================
3 |
4 | This project is mainly developed by `Rafael Ballester-Ripoll `_ (Visualization and MultiMedia Lab, University of Zurich). Feel free to contact me at rballester@ifi.uzh.ch for comments, ideas, or issues (consider using `GitHub's issue tracker `_ as well).
5 |
6 | `Pull requests `_ are welcome anytime!
7 |
--------------------------------------------------------------------------------
/docs/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VMML/tntorch/8c81a1cbb0c5b19db7c26a787acfca35e0fbd960/docs/favicon.png
--------------------------------------------------------------------------------
/docs/goals.rst:
--------------------------------------------------------------------------------
1 | Project Goals
2 | =============
3 |
4 | This package was born to bring together some of the most popular tensor decomposition models (including CP, Tucker, and the tensor train) under a common interface. Thus, we use *one class* for all those models. They are all particular cases of `tensor networks `_, and the idea is that decomposing, manipulating, and reconstructing tensors can be (to some extent) abstracted away from the particular decomposition format.
5 |
6 | Building on top of `PyTorch `_'s flexibility and built-in automatic differentiation, the overall goal is to exploit those features and allow users to quickly develop, model, and fit various tensor decompositions in a range of data science applications.
7 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | tntorch -- Tensor Network Learning with PyTorch
2 | ===============================================
3 |
4 | .. image:: tntorch.svg
5 | :width: 300 px
6 | :align: center
7 |
8 | This is a `PyTorch `__-powered library for tensor modeling and learning that features transparent support for the `tensor train (TT) model `_, `CANDECOMP/PARAFAC (CP) `_, the `Tucker model `_, and more. Supported operations (CPU and GPU) include:
9 |
10 | - Basic and fancy `indexing `_ of tensors, broadcasting, assignment, etc.
11 | - Tensor `decomposition and reconstruction `_
12 | - Element-wise and tensor-tensor `arithmetics `_
13 | - Building tensors from black-box functions using `cross-approximation `_
14 | - Statistics and `sensitivity analysis `_
15 | - Optimization using autodifferentiation, useful for e.g. `regression `_ or `classification `_
16 | - Misc. operations on tensors: stacking, unfolding, sampling, `derivating `_, etc.
17 |
18 | Get the Code
19 | ------------
20 |
21 | You can clone the project from `tntorch's GitHub page `_:
22 |
23 | .. code-block:: bash
24 |
25 | git clone https://github.com/rballester/tntorch.git
26 |
27 | or get it as a `zip file `_.
28 |
29 | Installation
30 | ------------
31 |
32 | The main dependencies are `NumPy `_ and `PyTorch `_ (we recommend to install those with `Conda `_ or `Miniconda `_). To install *tntorch*, run:
33 |
34 | .. code-block:: bash
35 |
36 | cd tntorch
37 | pip install .
38 |
39 | First Steps
40 | -----------
41 |
42 | Some basic tensor manipulation:
43 |
44 | .. code-block:: python
45 |
46 | import tntorch as tn
47 |
48 | t = tn.ones(64, 64) # 64 x 64 tensor, filled with ones
49 | t = t[:, :, None] + 2*t[:, None, :] # Singleton dimensions, broadcasting, and arithmetics
50 | print(tn.mean(t)) # Result: 3
51 |
52 | Decomposing a tensor:
53 |
54 | .. code-block:: python
55 |
56 | import tntorch as tn
57 |
58 | data = ... # A NumPy or PyTorch tensor
59 | t1 = tn.Tensor(data, ranks_cp=5) # A CP decomposition
60 | t2 = tn.Tensor(data, ranks_tucker=5) # A Tucker decomposition
61 | t3 = tn.Tensor(data, ranks_tt=5) # A tensor train decomposition
62 |
63 | To get fully on board, check out the complete documentation:
64 |
65 | .. toctree::
66 | :hidden:
67 |
68 | Welcome
69 |
70 | .. toctree::
71 | :maxdepth: 1
72 |
73 | goals
74 | api
75 | tutorial-notebooks
76 | contact
77 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | nbsphinx
2 | ipykernel
3 |
4 |
--------------------------------------------------------------------------------
/docs/tntorch.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/docs/tntorch_borders.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/docs/tutorial-notebooks.rst:
--------------------------------------------------------------------------------
1 | Tutorial Notebooks
2 | ==================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: Contents
7 |
8 | tutorials/introduction
9 | tutorials/active_subspaces
10 | tutorials/anova
11 | tutorials/arithmetics
12 | tutorials/automata
13 | tutorials/classification
14 | tutorials/completion
15 | tutorials/cross
16 | tutorials/decompositions
17 | tutorials/derivatives
18 | tutorials/exponential_machines
19 | tutorials/logic
20 | tutorials/main_formats
21 | tutorials/other_formats
22 | tutorials/pce
23 | tutorials/sobol
24 | tutorials/vector_fields
25 |
--------------------------------------------------------------------------------
/docs/tutorials/active_subspaces.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Active Subspaces\n",
8 | "\n",
9 | "Sometimes, the behavior of an $N$-dimensional model $f$ can be explained best by a *linear reparameterization* of its inputs variables, i.e. we can write $f(\\mathbf{x}) = g(\\mathbf{y}) = g(\\mathbf{M} \\cdot \\mathbf{x})$ where $\\mathbf{M}$ has size $M \\times N$ and $M < N$. When this happens, we say that $f$ admits an $M$-dimensional *active subspace* with basis given by $\\mathbf{M}$'s rows. Those basis vectors are the main directions of variance of the function $f$.\n",
10 | "\n",
11 | "The main directions are the eigenvectors of the matrix\n",
12 | "\n",
13 | "$\\mathbb{E}[\\nabla f^T \\cdot \\nabla f] = \\begin{pmatrix}\n",
14 | "\\mathbb{E}[f_{x_1} \\cdot f_{x_1}] & \\dots & \\mathbb{E}[f_{x_1} \\cdot f_{x_N}] \\\\\n",
15 | "\\dots & \\dots & \\dots \\\\\n",
16 | "\\mathbb{E}[f_{x_N} \\cdot f_{x_1}] & \\dots & \\mathbb{E}[f_{x_N} \\cdot f_{x_N}]\n",
17 | "\\end{pmatrix}$\n",
18 | "\n",
19 | "whereas the eigenvalues reveal the subspace's dimensionality --that is, a large gap between the $M$-th and $(M+1)$-th eigenvalue indicates that an $M$-dimensional active subspace is present.\n",
20 | "\n",
21 | "The necessary expected values are easy to compute from a tensor decomposition: they are just dot products between tensors. We will show a small demonstration of that in this notebook using a 4D function.\n",
22 | "\n",
23 | "Reference: see e.g. [\"Discovering an Active Subspace in a Single-Diode Solar Cell Model\", P. Constantine et al. (2015)](https://arxiv.org/abs/1406.7607)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 1,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "import tntorch as tn\n",
33 | "import torch\n",
34 | "\n",
35 | "def f(X):\n",
36 | " return X[:, 0] * X[:, 1] + X[:, 2]\n",
37 | "\n",
38 | "ticks = 64\n",
39 | "P = 100\n",
40 | "N = 4\n",
41 | "\n",
42 | "X = torch.rand((P, N))\n",
43 | "X *= (ticks-1)\n",
44 | "X = torch.round(X)\n",
45 | "y = f(X)"
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "We will fit this function `f` using a low-degree expansion in terms of [Legendre polynomials](pce.ipynb)."
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 2,
58 | "metadata": {},
59 | "outputs": [
60 | {
61 | "name": "stdout",
62 | "output_type": "stream",
63 | "text": [
64 | "iter: 0 | loss: 0.999513 | total time: 0.0020\n",
65 | "iter: 500 | loss: 0.977497 | total time: 0.8296\n",
66 | "iter: 1000 | loss: 0.763221 | total time: 1.6445\n",
67 | "iter: 1500 | loss: 0.044802 | total time: 2.5523\n",
68 | "iter: 2000 | loss: 0.008546 | total time: 3.4807\n",
69 | "iter: 2266 | loss: 0.008208 | total time: 3.9928 <- converged (tol=0.0001)\n"
70 | ]
71 | }
72 | ],
73 | "source": [
74 | "t = tn.rand(shape=[ticks]*N, ranks_tt=2, ranks_tucker=2, requires_grad=True)\n",
75 | "t.set_factors('legendre')\n",
76 | "\n",
77 | "def loss(t):\n",
78 | " return torch.norm(t[X].torch()-y) / torch.norm(y)\n",
79 | "tn.optimize(t, loss)\n"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 3,
85 | "metadata": {},
86 | "outputs": [
87 | {
88 | "data": {
89 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAGElJREFUeJzt3Xm4XXV97/H3h0GUOgAmDDJFEYuoNWIEHG7FapkcQC1VpBoUpTyFVjo8LXr7FIfaorfaXqwTXtF4VZAiaERUIiJoK0NQZBQJiJISSRhkEOQyfO8fax3YHM6wV5J99k7O+/U8+9lr/dZvr/09K9nnc9ZvDTtVhSRJ/dpg2AVIktYtBockqRODQ5LUicEhSerE4JAkdWJwSJI6MTg0qyU5JMlZw65jKkm+l+Ttw65DGrPRsAuQZkKS64GtgAd6mj9XVUcBXxxKUdI6yuDQbPLqqvrOsIuQ1nUOVWlWS3Jokh/0zO+d5Ooktyf5eJJze4eJkrwtyVVJbkvy7SQ79iyrJEckuaZd/rE0Nkny6yTP7uk7N8k9SbZMsnmSM5Ksal93RpLtJqn3PUm+0DM/r33fjdr5JyX5TJIVSf47yT8m2bBd9vT257k9yc1Jvrx2t6ZmC4NDaiWZA5wKvAt4MnA18KKe5QcC7wZeB8wFvg+cNG41rwJeADwX+GNgn6q6FzgNOLin3x8D51bVSprP4WeBHYEdgHuAf1/NH2MRcD/wdOB5wN7AWPC9HzgL2BzYDvjoar6HZjmDQ7PJV9u//Mce7xi3fH/giqo6raruB44HftWz/E+Bf66qq9rl/wTM793rAI6rql9X1S+Bc4D5bfuXeGRwvKlto6puqaqvVNXdVXUn8AHgpV1/uCRbAfsBR1fVb9pQ+lfgjW2X+2jC6SlV9duq+sEkq5KmZHBoNjmwqjbreXx63PKnADeMzVRzB9DlPct3BP73WPAAtwIBtu3p0xs0dwOPb6e/CzwuyR5t0MwHTgdIsmmSTyX5RZI7gPOAzcaGmDrYEdgYWNFT46eALdvlf9vWe2GSK5K8reP6JcCD41KvFTRDOAAkSe88Tah8oKo6n4VVVQ8mOYVmr+Mm4Ix27wLgr4HfBfaoql8lmQ/8mOaX/Hi/ATbtmd96XH33AnPaPaLxNfwKeEf7s70E+E6S86pqWdefR7ObexzSw74BPCfJge3B5iN55C/mTwLvSvIseOhA9EEd1v8l4A3AIe30mCfQHNf4dZItgGOnWMclwO8n2SHJk2iOxwBQVStojmF8OMkTk2yQZKckL23rPajnoPttQPHI05Olvhgcmk2+nuSunsfpvQur6mbgIOBDwC3ArsBSmr/iqarTgQ8CJ7dDSpfTHFPoS1VdQLPH8BTgmz2L/g14HHAzcD7wrSnWsQT4MnApcDFwxrgubwEeA1xJEw6nAtu0y14AXJDkLmAx8M6q+nm/9Utj4hc5SRNLsgHNMY5DquqcYdcjjQr3OKQeSfZJslmSTWhOvQ3NXoCklsEhPdILgWtpho1eTXMm1j3DLUkaLQ5VSZI6cY9DktTJenkdx5w5c2revHnDLkOS1ikXX3zxzVU1d7p+62VwzJs3j6VLlw67DElapyT5RT/9HKqSJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHWyXl45vqbmHfONYZcwVNcf98phlyBphLnHIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOhlYcCTZPsk5Sa5KckWSd7btWyRZkuSa9nnztj1Jjk+yLMmlSXbrWdfCtv81SRYOqmZJ0vQGucdxP/DXVfVMYE/gyCS7AscAZ1fVzsDZ7TzAfsDO7eNw4BPQBA1wLLAHsDtw7FjYSJJm3sCCo6pWVNWP2uk7gauAbYEDgEVtt0XAge30AcDnq3E+sFmSbYB9gCVVdWtV3QYsAfYdVN2SpKnNyDGOJPOA5wEXAFtV1QpowgXYsu22LXBDz8uWt22TtY9/j8OTLE2ydNWqVWv7R5AktQYeHEkeD3wFOLqq7piq6wRtNUX7IxuqTqiqBVW1YO7cuatXrCRpWgMNjiQb04TGF6vqtLb5pnYIivZ5Zdu+HNi+5+XbATdO0S5JGoJBnlUV4DPAVVX1kZ5Fi4GxM6MWAl/raX9Le3bVnsDt7VDWt4G9k2zeHhTfu22TJA3BRgNc94uBNwOXJbmkbXs3cBxwSpLDgF8CB7XLzgT2B5YBdwNvBaiqW5O8H7io7fe+qrp1gHVLkqYwsOCoqh8w8fEJgJdP0L+AIydZ14nAiWuvOknS6vLKcUlSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHVicEiSOjE4JEmdGBySpE4MDklSJwaHJKkTg0OS1InBIUnqxOCQJHUysOBIcmKSlUku72l7T5L/TnJJ+9i/Z9m7kixLcnWSfXra923bliU5ZlD1SpL6M8g9js8B+07Q/q9VNb99nAmQZFfgjcCz2td8PMmGSTYEPgbsB+wKHNz2lSQNyUaDWnFVnZdkXp/dDwBOrqp7gZ8nWQbs3i5bVlXXASQ5ue175VouV5LUp2Ec4zgqyaXtUNbmbdu2wA09fZa3bZO1P0qSw5MsTbJ01apVg6hbksTMB8cngJ2A+cAK4MNteyboW1O0P7qx6oSqWlBVC+bOnbs2apUkTWBgQ1UTqaqbxqaTfBo4o51dDmzf03U74MZ2erJ2SdIQzOgeR5JtemZfC4ydcbUYeGOSTZI8FdgZuBC4CNg5yVOTPIbmAPrimaxZkvRIA9vjSHISsBcwJ8ly4FhgryTzaYabrgf+FKCqrkhyCs1B7/uBI6vqgXY9RwHfBjYETqyqKwZVsyRpen0FR5IAhwBPq6r3JdkB2LqqLpzsNVV18ATNn5mi/weAD0zQfiZwZj91SpIGr9+hqo8DLwTGwuBOmusrJEmzTL9DVXtU1W5JfgxQVbe1xxwkSbNMv3sc97VXcRdAkrnAgwOrSpI0svoNjuOB04Etk3wA+AHwTwOrSpI0svoaqqqqLya5GHg5zUV5B1bVVQOtTJI0kqYMjiRb9MyuBE7qXVZVtw6qMEnSaJpuj+NiHr71xw7Abe30ZsAvgacOtDpJ0siZ8hhHVT21qp5GcwHeq6tqTlU9GXgVcNpMFChJGi39Hhx/wdh3ZwBU1TeBlw6mJEnSKOv3Oo6bk/w98AWaoas/AW4ZWFWSpJHV7x7HwcBcmlNyvwpsycNXkUuSZpF+T8e9FXjngGuRJK0D+r3J4TlM8AVKVfUHa70iSdJI6/cYx9/0TD8WeD3N7c8lSbNMv0NVF49r+s8k5w6gHknSiOt3qKr3CvINgOcDWw+kIknSSOt3qKr3CvL7gZ8Dhw2qKEnS6Oo3OJ5ZVb/tbUiyyQDqkSSNuH6v4/ivCdp+uDYLkSStG6a7O+7WwLbA45I8j2aoCuCJwKYDrk2SNIKmG6raBzgU2A74SE/7ncC7B1STJGmETRkcVbUIWJTk9VX1lRmqSZI0wqYbqvqTqvoCMC/JX41fXlUfmeBlkqT12HRDVb/TPj9+0IVIktYN0w1Vfap9fu/MlCNJGnX9Xjk+F3gHMK/3NVX1tsGUJUkaVf1eAPg14PvAd4AHBleOJGnU9Rscm1bV3w20EknSOqHfK8fPSLL/QCuRJK0T+g2Od9KExz1J7khyZ5I7BlmYJGk09ft9HE8YdCGSpHVDv2dV7TZB8+3AL6rKbwKUpFmk34PjHwd2Ay5r558D/AR4cpIjquqsQRQnSRo9/R7juB54XlU9v6qeD8wHLgdeAXxoQLVJkkZQv8GxS1VdMTZTVVfSBMl1gylLkjSq+h2qujrJJ4CT2/k3AD9rvwXwvoFUJkkaSf3ucRwKLAOOBv4SuK5tuw942SAKkySNpr6Co6ruqaoPV9Vrq+rAqvqXqrq7qh6sqrsmek2SE5OsTHJ5T9sWSZYkuaZ93rxtT5LjkyxLcmnvWVxJFrb9r0mycE1/YEnSmukrOJLsnOTUJFcmuW7sMc3LPgfsO67tGODsqtoZOLudB9gP2Ll9HA58on3fLYBjgT2A3YFjx8JGkjQc/Q5VfZbml/n9NENTnwf+71QvqKrzgFvHNR8ALGqnFwEH9rR/vhrnA5sl2Ybmq2uXVNWtVXUbsIRHh5EkaQb1GxyPq6qzgVTVL6rqPcAfrMb7bVVVKwDa5y3b9m2BG3r6LW/bJmt/lCSHJ1maZOmqVatWozRJUj/6DY7fJtkAuCbJUUley8O/9NeGTNBWU7Q/urHqhKpaUFUL5s6duxZLkyT16jc4jgY2Bf4CeD7wZmB1DlTf1A5B0T6vbNuXA9v39NsOuHGKdknSkPR7VtVFVXVXVS2vqrdW1evaYxFdLebhwFlI8wVRY+1vac+u2hO4vR3K+jawd5LN24Pie7dtkqQhmfICwCSLp1peVa+Z4rUnAXsBc5Ispzk76jjglCSHAb8EDmq7nwnsT3OtyN3AW9v135rk/cBFbb/3VdX4A+6SpBk03ZXjL6Q5OH0ScAETH3OYUFUdPMmil0/Qt4AjJ1nPicCJ/b6vJGmwpguOrYE/BA4G3gR8Azip975VkqTZZcpjHFX1QFV9q6oWAnvSDCV9L8mfz0h1kqSRM+1NDtsbGb6SZq9jHnA8cNpgy5IkjarpDo4vAp4NfBN4b1VdPlV/SdL6b7o9jjcDvwGeAfxF8tCx8dAc037iAGuTJI2gKYOjqvq9QFCSNEsYDJKkTgwOSVInBockqRODQ5LUicEhSerE4JAkdWJwSJI6MTgkSZ0YHJKkTgwOSVInBockqRODQ5LUicEhSerE4JAkdWJwSJI6MTgkSZ0YHJKkTgwOSVInBockqRODQ5LUicEhSerE4JAkdWJwSJI6MTgkSZ0YHJKkTgwOSVInBockqRODQ5LUicEhSerE4JAkdWJwSJI6GUpwJLk+yWVJLkmytG3bIsmSJNe0z5u37UlyfJJlSS5NstswapYkNYa5x/GyqppfVQva+WOAs6tqZ+Dsdh5gP2Dn9nE48IkZr1SS9JBRGqo6AFjUTi8CDuxp/3w1zgc2S7LNMAqUJA0vOAo4K8nFSQ5v27aqqhUA7fOWbfu2wA09r13etj1CksOTLE2ydNWqVQMsXZJmt42G9L4vrqobk2wJLEny0yn6ZoK2elRD1QnACQALFix41HJJ0toxlD2OqrqxfV4JnA7sDtw0NgTVPq9suy8Htu95+XbAjTNXrSSp14wHR5LfSfKEsWlgb+ByYDGwsO22EPhaO70YeEt7dtWewO1jQ1qSpJk3jKGqrYDTk4y9/5eq6ltJLgJOSXIY8EvgoLb/mcD+wDLgbuCtM1+yJGnMjAdHVV0HPHeC9luAl0/QXsCRM1CaJKkPo3Q6riRpHWBwSJI6MTgkSZ0YHJKkTgwOSVInw7pyXOuxecd8Y9glDNX1x71y2CVIA+UehySpE4NDktSJwSFJ6sTgkCR1YnBIkjoxOCRJnRgckqRODA5JUicGhySpE4NDktSJwSFJ6sTgkCR1YnBIkjoxOCRJnRgckqRODA5JUicGhySpE4NDktSJwSFJ6sTgkCR1YnBIkjoxOCRJnRgckqRODA5JUicGhySpE4NDktSJwSFJ6sTgkCR1YnBIkjoxOCRJnRgckqRO1pngSLJvkquTLEtyzLDrkaTZap0IjiQbAh8D9gN2BQ5Osutwq5Kk2WmdCA5gd2BZVV1XVf8POBk4YMg1SdKstNGwC+jTtsANPfPLgT16OyQ5HDi8nb0rydVTrG8OcPNarXDtGmp9+eC0Xdx+U3D7DZz1rZmp6tuxnxWsK8GRCdrqETNVJwAn9LWyZGlVLVgbhQ2C9a0Z61sz1rdmZkN968pQ1XJg+5757YAbh1SLJM1q60pwXATsnOSpSR4DvBFYPOSaJGlWWieGqqrq/iRHAd8GNgROrKor1mCVfQ1pDZH1rRnrWzPWt2bW+/pSVdP3kiSpta4MVUmSRoTBIUnqZFYER5ItkixJck37vPkk/R5Ickn7GOjB9+luoZJkkyRfbpdfkGTeIOtZjfoOTbKqZ3u9fYbrOzHJyiSXT7I8SY5v6780yW4jVt9eSW7v2X7/MMP1bZ/knCRXJbkiyTsn6DO0bdhnfUPbhkkem+TCJD9p63vvBH2G9hnus77V/wxX1Xr/AD4EHNNOHwN8cJJ+d81QPRsC1wJPAx4D/ATYdVyfPwM+2U6/EfjyDG6vfuo7FPj3If6b/j6wG3D5JMv3B75Jcw3QnsAFI1bfXsAZQ9x+2wC7tdNPAH42wb/x0LZhn/UNbRu22+Tx7fTGwAXAnuP6DPMz3E99q/0ZnhV7HDS3J1nUTi8CDhxiLdDfLVR6az4VeHmSiS6EHFZ9Q1VV5wG3TtHlAODz1Tgf2CzJNjNTXV/1DVVVraiqH7XTdwJX0dyhodfQtmGf9Q1Nu03uamc3bh/jzzQa2me4z/pW22wJjq2qagU0/yGBLSfp99gkS5Ocn2SQ4TLRLVTGfyge6lNV9wO3A08eYE0TvndrovoAXt8OYZyaZPsJlg9Tvz/DML2wHUr4ZpJnDauIdgjleTR/lfYaiW04RX0wxG2YZMMklwArgSVVNen2G8JnuJ/6YDU/w+tNcCT5TpLLJ3h0+Ut5h2ouxX8T8G9JdhpUuRO0jf9roJ8+g9LPe38dmFdVvwd8h4f/shoVw9x+/fgRsGNVPRf4KPDVYRSR5PHAV4Cjq+qO8YsneMmMbsNp6hvqNqyqB6pqPs2dLHZP8uxxXYa6/fqob7U/w+tNcFTVK6rq2RM8vgbcNLaL3T6vnGQdN7bP1wHfo/krZxD6uYXKQ32SbAQ8iZkb+pi2vqq6parubWc/DTx/hmrr10jfpqaq7hgbSqiqM4GNk8yZyRqSbEzzS/mLVXXaBF2Gug2nq28UtmH73r+m+X2x77hFw/wMP2Sy+tbkM7zeBMc0FgML2+mFwNfGd0iyeZJN2uk5wIuBKwdUTz+3UOmt+Y+A71Z7RGsGTFvfuLHu19CMQY+SxcBb2jOD9gRuHxuuHAVJth4b706yO81n8ZYZfP8AnwGuqqqPTNJtaNuwn/qGuQ2TzE2yWTv9OOAVwE/HdRvaZ7if+tboMzxTR/mH+aAZVzwbuKZ93qJtXwD8n3b6RcBlNGcQXQYcNuCa9qc5U+Ra4H+2be8DXtNOPxb4D2AZcCHwtBneZtPV98/AFe32OgfYZYbrOwlYAdxH85fdYcARwBHt8tB8+de17b/nghGr76ie7Xc+8KIZru8lNMMmlwKXtI/9R2Ub9lnf0LYh8HvAj9v6Lgf+oW0fic9wn/Wt9mfYW45IkjqZLUNVkqS1xOCQJHVicEiSOjE4JEmdGBySpE4MDomHrgk4Ocm1Sa5McmaSZwy7rtXV3jn2RcOuQ+sng0OzXnsR2enA96pqp6raFXg3sNVwK1sje9FcmyStdQaHBC8D7quqT441VNUlwA+S/K/2nmeXJXkDPPTX/LlJTknysyTHJTmk/f6Dy8bucZbkc0k+meT7bb9Xte2PTfLZtu+Pk7ysbT80yWlJvpXmu2M+NFZPkr2T/DDJj5L8R3sPJ5Jcn+S9bftlSXZpbwp4BPCXab5n4X/MzGbUbLHRsAuQRsCzgYsnaH8dMB94LjAHuCjJee2y5wLPpLn30HU0dyDYPc0XDv05cHTbbx7wUmAn4JwkTweOBKiq5yTZBTirZ1hsPs090u4Frk7yUeAe4O+BV1TVb5L8HfBXNFcBA9xcVbsl+TPgb6rq7Uk+SfP9Mv+yphtHGs/gkCb3EuCkqnqA5kaZ5wIvAO4ALqr2vk1JrgXOal9zGc0ezJhTqupB4Jok1wG7tOv9KEBV/TTJL4Cx4Di7qm5v13slsCOwGbAr8J/trZkeA/yw5z3GbgB4MU3YSQNlcEjN/Xr+aIL2qb50596e6Qd75h/kkZ+r8ff0qQ7rfaBdV2i+T+HgaV4z1l8aKI9xSPBdYJMk7xhrSPIC4DbgDWm+EGcuzdfBXthx3Qcl2aA97vE04GrgPOCQ9n2eAezQtk/mfODF7TAXSTbt44yvO2m+clVa6wwOzXrV3OnztcAftqfjXgG8B/gSzd1Ff0ITLn9bVb/quPqrgXNpvrv7iKr6LfBxYMMklwFfBg6th78XYaL6VtF8P/RJSS6lCZJdpnnfrwOv9eC4BsG740oDkuRzwBlVdeqwa5HWJvc4JEmduMchSerEPQ5JUicGhySpE4NDktSJwSFJ6sTgkCR18v8B9M2zK1IR29MAAAAASUVORK5CYII=\n",
90 | "text/plain": [
91 | ""
92 | ]
93 | },
94 | "metadata": {},
95 | "output_type": "display_data"
96 | }
97 | ],
98 | "source": [
99 | "eigvals, eigvecs = tn.active_subspace(t)\n",
100 | "\n",
101 | "import matplotlib.pyplot as plt\n",
102 | "%matplotlib inline\n",
103 | "plt.figure()\n",
104 | "plt.bar(range(N), eigvals.detach().numpy())\n",
105 | "plt.title('Eigenvalues')\n",
106 | "plt.xlabel('Component')\n",
107 | "plt.ylabel('Magnitude')\n",
108 | "plt.show()"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "In view of those eigenvalues, we can conclude that the learned model can be written (almost) perfectly in terms of 2 linearly reparameterized variables."
116 | ]
117 | }
118 | ],
119 | "metadata": {
120 | "kernelspec": {
121 | "display_name": "Python 3",
122 | "language": "python",
123 | "name": "python3"
124 | },
125 | "language_info": {
126 | "codemirror_mode": {
127 | "name": "ipython",
128 | "version": 3
129 | },
130 | "file_extension": ".py",
131 | "mimetype": "text/x-python",
132 | "name": "python",
133 | "nbconvert_exporter": "python",
134 | "pygments_lexer": "ipython3",
135 | "version": "3.7.2"
136 | }
137 | },
138 | "nbformat": 4,
139 | "nbformat_minor": 2
140 | }
141 |
--------------------------------------------------------------------------------
/docs/tutorials/arithmetics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Arithmetics\n",
8 | "\n",
9 | "## Basic Arithmetics\n",
10 | "\n",
11 | "The most basic tensor operations (addition `+`, subtraction `-`, and product `*` with either a scalar or with another tensor) can be accomplished via direct manipulation of tensor cores (see e.g. the [original tensor train paper](https://epubs.siam.org/doi/abs/10.1137/090752286?journalCode=sjoce3))."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 1,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "name": "stdout",
21 | "output_type": "stream",
22 | "text": [
23 | "4D TT tensor:\n",
24 | "\n",
25 | " 32 32 32 32\n",
26 | " | | | |\n",
27 | " (0) (1) (2) (3)\n",
28 | " / \\ / \\ / \\ / \\\n",
29 | "1 1 1 1 1\n",
30 | "\n"
31 | ]
32 | }
33 | ],
34 | "source": [
35 | "import tntorch as tn\n",
36 | "import torch\n",
37 | "import numpy as np\n",
38 | "\n",
39 | "t1 = tn.ones([32]*4)\n",
40 | "t2 = tn.ones([32]*4)\n",
41 | "\n",
42 | "t = tn.round((t1+t2)*(t2-2))\n",
43 | "print(t)"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "You can also *assign* values to parts of a tensor:"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": 2,
56 | "metadata": {},
57 | "outputs": [
58 | {
59 | "name": "stdout",
60 | "output_type": "stream",
61 | "text": [
62 | "tensor([[6., 6., 2., 2., 2.],\n",
63 | " [6., 6., 2., 2., 2.],\n",
64 | " [6., 6., 2., 2., 2.],\n",
65 | " [3., 3., 1., 1., 1.],\n",
66 | " [3., 3., 1., 1., 1.]])\n"
67 | ]
68 | }
69 | ],
70 | "source": [
71 | "t = tn.ones(5, 5)\n",
72 | "t[:3, :] = 2\n",
73 | "t[:, :2] *= 3\n",
74 | "print(t.torch())"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {},
80 | "source": [
81 | "## Advanced Operations\n",
82 | "\n",
83 | "Thanks to [cross-approximation](cross.ipynb), *tntorch* supports many other more advanced operations on tensors, including element-wise division `/`, `exp()`, `log()`, `sin()`, etc."
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 3,
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "name": "stdout",
93 | "output_type": "stream",
94 | "text": [
95 | "4D TT-Tucker tensor:\n",
96 | "\n",
97 | " 32 32 32 32\n",
98 | " | | | |\n",
99 | " 7 13 13 7\n",
100 | " (0) (1) (2) (3)\n",
101 | " / \\ / \\ / \\ / \\\n",
102 | "1 7 7 7 1\n",
103 | "\n"
104 | ]
105 | }
106 | ],
107 | "source": [
108 | "domain = [torch.linspace(0, np.pi, 32)]*4\n",
109 | "x, y, z, w = tn.meshgrid(domain)\n",
110 | "\n",
111 | "t = tn.round(1 / (1+x+y+z+w))\n",
112 | "print(t)"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "We will now try the trigonometric identity $\\sin^2(x) + \\cos^2(x) = 1$:"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": 4,
125 | "metadata": {},
126 | "outputs": [
127 | {
128 | "name": "stdout",
129 | "output_type": "stream",
130 | "text": [
131 | "4D TT tensor:\n",
132 | "\n",
133 | " 32 32 32 32\n",
134 | " | | | |\n",
135 | " (0) (1) (2) (3)\n",
136 | " / \\ / \\ / \\ / \\\n",
137 | "1 13 17 13 1\n",
138 | "\n"
139 | ]
140 | }
141 | ],
142 | "source": [
143 | "t = tn.round(tn.sin(t)**2 + tn.cos(t)**2)\n",
144 | "print(t)"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "The tensor `t` should be $1$ everywhere. Indeed:"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 5,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "name": "stdout",
161 | "output_type": "stream",
162 | "text": [
163 | "tensor(1.0000)\n",
164 | "tensor(1.8159e-15)\n"
165 | ]
166 | }
167 | ],
168 | "source": [
169 | "print(tn.mean(t))\n",
170 | "print(tn.var(t))"
171 | ]
172 | }
173 | ],
174 | "metadata": {
175 | "kernelspec": {
176 | "display_name": "Python 3",
177 | "language": "python",
178 | "name": "python3"
179 | },
180 | "language_info": {
181 | "codemirror_mode": {
182 | "name": "ipython",
183 | "version": 3
184 | },
185 | "file_extension": ".py",
186 | "mimetype": "text/x-python",
187 | "name": "python",
188 | "nbconvert_exporter": "python",
189 | "pygments_lexer": "ipython3",
190 | "version": "3.6.7"
191 | }
192 | },
193 | "nbformat": 4,
194 | "nbformat_minor": 2
195 | }
196 |
--------------------------------------------------------------------------------
/docs/tutorials/automata.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Automata\n",
8 | "\n",
9 | "Tensor trains can represent compactly *deterministic finite automata* and *weighted finite automata* that read a fixed number of symbols."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import torch\n",
19 | "import tntorch as tn"
20 | ]
21 | },
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "For instance, `weight_mask` produces an automaton that accepts a string iff it has a certain amount of 1's:"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 2,
32 | "metadata": {},
33 | "outputs": [
34 | {
35 | "data": {
36 | "text/plain": [
37 | "4D TT tensor:\n",
38 | "\n",
39 | " 2 2 2 2\n",
40 | " | | | |\n",
41 | " (0) (1) (2) (3)\n",
42 | " / \\ / \\ / \\ / \\\n",
43 | "1 2 3 2 1"
44 | ]
45 | },
46 | "execution_count": 2,
47 | "metadata": {},
48 | "output_type": "execute_result"
49 | }
50 | ],
51 | "source": [
52 | "m = tn.weight_mask(N=4, weight=2)\n",
53 | "m"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "All accepted input strings can be retrieved alphabetically via `accepted_inputs()`:"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 3,
66 | "metadata": {},
67 | "outputs": [
68 | {
69 | "data": {
70 | "text/plain": [
71 | "tensor([[0, 0, 1, 1],\n",
72 | " [0, 1, 0, 1],\n",
73 | " [0, 1, 1, 0],\n",
74 | " [1, 0, 0, 1],\n",
75 | " [1, 0, 1, 0],\n",
76 | " [1, 1, 0, 0]])"
77 | ]
78 | },
79 | "execution_count": 3,
80 | "metadata": {},
81 | "output_type": "execute_result"
82 | }
83 | ],
84 | "source": [
85 | "tn.accepted_inputs(m)"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "On the other hand, `weight()` produces an automaton that is a little different. Instead of accepting or rejecting strings, it just counts how many 1's the string has:"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": 4,
98 | "metadata": {},
99 | "outputs": [
100 | {
101 | "name": "stdout",
102 | "output_type": "stream",
103 | "text": [
104 | "tensor(0.)\n",
105 | "tensor(1.)\n",
106 | "tensor(3.)\n"
107 | ]
108 | }
109 | ],
110 | "source": [
111 | "m = tn.weight(N=4)\n",
112 | "print(m[0, 0, 0, 0])\n",
113 | "print(m[0, 1, 0, 0])\n",
114 | "print(m[1, 0, 1, 1])"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "### Applications\n",
122 | "\n",
123 | "TT automata come in handy to group and sum tensor entries, which is important to obtain advanced [metrics for sensitivity analysis](sobol.ipynb). See also the tutorial on [Boolean logic with *tntorch*](logic.ipynb)."
124 | ]
125 | }
126 | ],
127 | "metadata": {
128 | "kernelspec": {
129 | "display_name": "Python 3",
130 | "language": "python",
131 | "name": "python3"
132 | },
133 | "language_info": {
134 | "codemirror_mode": {
135 | "name": "ipython",
136 | "version": 3
137 | },
138 | "file_extension": ".py",
139 | "mimetype": "text/x-python",
140 | "name": "python",
141 | "nbconvert_exporter": "python",
142 | "pygments_lexer": "ipython3",
143 | "version": "3.7.2"
144 | }
145 | },
146 | "nbformat": 4,
147 | "nbformat_minor": 2
148 | }
149 |
--------------------------------------------------------------------------------
/docs/tutorials/cross.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Cross-approximation\n",
8 | "\n",
9 | "Often, we would like to build a $N$-dimensional tensor from a **black-box function** $f: \\Omega \\subset \\mathbb{R}^N \\to \\mathbb{R}$, where $\\Omega$ is a tensor product grid. That is, we are free to sample *whatever entries we want* within our domain $\\Omega$, but we cannot afford to sample the entire domain (as it contains an **exponentially large number of points**). One way to build such a tensor is using **cross-approximation** ([I. Oseledets, E. Tyrtyshnikov: \"TT-cross Approximation for Multidimensional Arrays\"](http://www.mat.uniroma2.it/~tvmsscho/papers/Tyrtyshnikov5.pdf)) from well-chosen fibers in the domain.\n",
10 | "\n",
11 | "We support two major use cases of cross-approximation in the TT format.\n",
12 | "\n",
13 | "## Approximating a Function over a Domain\n",
14 | "\n",
15 | "This is the more basic setting. We just need to specify:\n",
16 | "\n",
17 | "- Our function of interest\n",
18 | "- The tensor product domain $\\Omega = u_1 \\otimes \\dots \\otimes u_N$"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 1,
24 | "metadata": {},
25 | "outputs": [
26 | {
27 | "name": "stdout",
28 | "output_type": "stream",
29 | "text": [
30 | "Cross-approximation over a 5D domain containing 3.35544e+07 grid points:\n",
31 | "iter: 0 | eps: 9.221e-01 | total time: 0.0110 | largest rank: 1\n",
32 | "iter: 1 | eps: 4.867e-03 | total time: 0.0350 | largest rank: 4\n",
33 | "iter: 2 | eps: 4.295e-06 | total time: 0.0609 | largest rank: 7\n",
34 | "iter: 3 | eps: 8.606e-09 | total time: 0.1027 | largest rank: 10 <- converged: eps < 1e-06\n",
35 | "Did 33984 function evaluations, which took 0.001594s (2.133e+07 evals/s)\n",
36 | "\n",
37 | "5D TT tensor:\n",
38 | "\n",
39 | " 32 32 32 32 32\n",
40 | " | | | | |\n",
41 | " (0) (1) (2) (3) (4)\n",
42 | " / \\ / \\ / \\ / \\ / \\\n",
43 | "1 10 10 10 10 1\n",
44 | "\n"
45 | ]
46 | }
47 | ],
48 | "source": [
49 | "import tntorch as tn\n",
50 | "import torch\n",
51 | "\n",
52 | "def function(x, y, z, t, w): # Input arguments are vectors\n",
53 | " return 1 / (x + y + z + t + w) # Hilbert tensor\n",
54 | "\n",
55 | "domain = [torch.arange(1, 33) for n in range(5)]\n",
56 | "t = tn.cross(function=function, domain=domain)\n",
57 | "\n",
58 | "print(t)"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "Sometimes it's more convenient to work with functions that accept matrices (instead of a list of vectors) as input. We can do this with the `function_arg='matrix'` flag:"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 2,
71 | "metadata": {},
72 | "outputs": [
73 | {
74 | "name": "stdout",
75 | "output_type": "stream",
76 | "text": [
77 | "Cross-approximation over a 5D domain containing 3.35544e+07 grid points:\n",
78 | "iter: 0 | eps: 9.355e-01 | total time: 0.0138 | largest rank: 1\n",
79 | "iter: 1 | eps: 4.148e-03 | total time: 0.0341 | largest rank: 4\n",
80 | "iter: 2 | eps: 5.244e-06 | total time: 0.0610 | largest rank: 7\n",
81 | "iter: 3 | eps: 7.581e-09 | total time: 0.0961 | largest rank: 10 <- converged: eps < 1e-06\n",
82 | "Did 33984 function evaluations, which took 0.00437s (7.777e+06 evals/s)\n",
83 | "\n"
84 | ]
85 | }
86 | ],
87 | "source": [
88 | "def function(Xs): # Matrix (one row per sample, one column per input variable) and return a vector with one result per sample\n",
89 | " return 1/torch.sum(Xs, dim=1)\n",
90 | "\n",
91 | "t = tn.cross(function=function, domain=domain, function_arg='matrix')"
92 | ]
93 | },
94 | {
95 | "cell_type": "markdown",
96 | "metadata": {},
97 | "source": [
98 | "## Element-wise Operations on Tensors\n",
99 | "\n",
100 | "Here we have one (or several) $N$-dimensional tensors that we want to transform element-wise. For instance, we may want to square each element of our tensor:"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 3,
106 | "metadata": {},
107 | "outputs": [
108 | {
109 | "name": "stdout",
110 | "output_type": "stream",
111 | "text": [
112 | "Cross-approximation over a 5D domain containing 3.35544e+07 grid points:\n",
113 | "iter: 0 | eps: 9.539e-01 | total time: 0.0062 | largest rank: 1\n",
114 | "iter: 1 | eps: 2.066e-02 | total time: 0.0174 | largest rank: 4\n",
115 | "iter: 2 | eps: 5.644e-05 | total time: 0.0338 | largest rank: 7\n",
116 | "iter: 3 | eps: 6.255e-08 | total time: 0.0627 | largest rank: 10 <- converged: eps < 1e-06\n",
117 | "Did 33984 function evaluations, which took 0.0005157s (6.59e+07 evals/s)\n",
118 | "\n"
119 | ]
120 | }
121 | ],
122 | "source": [
123 | "t2 = tn.cross(function=lambda x: x**2, tensors=t)"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "metadata": {},
129 | "source": [
130 | "Just for practice, let's do this now in a slightly different way by passing two tensors:"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": 4,
136 | "metadata": {},
137 | "outputs": [
138 | {
139 | "name": "stdout",
140 | "output_type": "stream",
141 | "text": [
142 | "Cross-approximation over a 5D domain containing 3.35544e+07 grid points:\n",
143 | "iter: 0 | eps: 9.757e-01 | total time: 0.0081 | largest rank: 1\n",
144 | "iter: 1 | eps: 2.939e-02 | total time: 0.0228 | largest rank: 4\n",
145 | "iter: 2 | eps: 1.086e-04 | total time: 0.0440 | largest rank: 7\n",
146 | "iter: 3 | eps: 8.331e-08 | total time: 0.0675 | largest rank: 10 <- converged: eps < 1e-06\n",
147 | "Did 33984 function evaluations, which took 0.0005171s (6.572e+07 evals/s)\n",
148 | "\n"
149 | ]
150 | }
151 | ],
152 | "source": [
153 | "t2 = tn.cross(function=lambda x, y: x*y, tensors=[t, t])"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {},
159 | "source": [
160 | "Let's check the accuracy of our cross-approximated squaring operation, compared to the groundtruth `t*t`:"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 5,
166 | "metadata": {},
167 | "outputs": [
168 | {
169 | "data": {
170 | "text/plain": [
171 | "tensor(8.6986e-08)"
172 | ]
173 | },
174 | "execution_count": 5,
175 | "metadata": {},
176 | "output_type": "execute_result"
177 | }
178 | ],
179 | "source": [
180 | "tn.relative_error(t*t, t2)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "See [this notebook](arithmetics.ipynb) for more examples on element-wise tensor operations."
188 | ]
189 | }
190 | ],
191 | "metadata": {
192 | "kernelspec": {
193 | "display_name": "Python 3",
194 | "language": "python",
195 | "name": "python3"
196 | },
197 | "language_info": {
198 | "codemirror_mode": {
199 | "name": "ipython",
200 | "version": 3
201 | },
202 | "file_extension": ".py",
203 | "mimetype": "text/x-python",
204 | "name": "python",
205 | "nbconvert_exporter": "python",
206 | "pygments_lexer": "ipython3",
207 | "version": "3.6.7"
208 | }
209 | },
210 | "nbformat": 4,
211 | "nbformat_minor": 2
212 | }
213 |
--------------------------------------------------------------------------------
/docs/tutorials/decompositions.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Tensor Decompositions\n",
8 | "\n",
9 | "The philosophy of *tntorch* is simple: **one class for all formats**. [Different decompositions](main_formats.ipynb) (CP, Tucker, TT, hybrids) all use the same interface. \n",
10 | "\n",
11 | "*Note: sometimes the internal format will change automatically. For example, no recompression algorithm is known for the CP format, and running `round()` on a CP tensor will convert it to the TT format.*\n",
12 | "\n",
13 | "We will give a few examples of how to compress a full tensor into different tensor formats."
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": 1,
19 | "metadata": {},
20 | "outputs": [
21 | {
22 | "name": "stdout",
23 | "output_type": "stream",
24 | "text": [
25 | "torch.Size([128, 128, 128])\n"
26 | ]
27 | }
28 | ],
29 | "source": [
30 | "import tntorch as tn\n",
31 | "import torch\n",
32 | "import time\n",
33 | "\n",
34 | "import numpy as np\n",
35 | "X, Y, Z = np.meshgrid(range(128), range(128), range(128))\n",
36 | "full = torch.Tensor(np.sqrt(np.sqrt(X)*(Y+Z) + Y*Z**2)*(X + np.sin(Y)*np.cos(Z))) # Some analytical 3D function\n",
37 | "print(full.shape)"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "## TT\n",
45 | "\n",
46 | "To compress as a low-rank tensor train (TT), use the `ranks_tt` argument:"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 2,
52 | "metadata": {},
53 | "outputs": [
54 | {
55 | "name": "stdout",
56 | "output_type": "stream",
57 | "text": [
58 | "3D TT tensor:\n",
59 | "\n",
60 | " 128 128 128\n",
61 | " | | |\n",
62 | " (0) (1) (2)\n",
63 | " / \\ / \\ / \\\n",
64 | "1 3 3 1\n",
65 | "\n",
66 | "Compression ratio: 2097152/1920 = 1092.27\n",
67 | "Relative error: tensor(0.0005)\n",
68 | "RMSE: tensor(22.0745)\n",
69 | "R^2: tensor(1.0000)\n"
70 | ]
71 | }
72 | ],
73 | "source": [
74 | "t = tn.Tensor(full, ranks_tt=3) # You can also pass a list of ranks\n",
75 | "\n",
76 | "def metrics():\n",
77 | " print(t)\n",
78 | " print('Compression ratio: {}/{} = {:g}'.format(full.numel(), t.numel(), full.numel() / t.numel()))\n",
79 | " print('Relative error:', tn.relative_error(full, t))\n",
80 | " print('RMSE:', tn.rmse(full, t))\n",
81 | " print('R^2:', tn.r_squared(full, t))\n",
82 | " \n",
83 | "metrics()"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "metadata": {},
89 | "source": [
90 | "The TT cores are available as `t.cores`."
91 | ]
92 | },
93 | {
94 | "cell_type": "markdown",
95 | "metadata": {},
96 | "source": [
97 | "## Tucker\n",
98 | "\n",
99 | "Use the `ranks_tucker` argument:"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 3,
105 | "metadata": {},
106 | "outputs": [
107 | {
108 | "name": "stdout",
109 | "output_type": "stream",
110 | "text": [
111 | "3D TT-Tucker tensor:\n",
112 | "\n",
113 | " 128 128 128\n",
114 | " | | |\n",
115 | " 3 3 3\n",
116 | " (0) (1) (2)\n",
117 | " / \\ / \\ / \\\n",
118 | "1 9 3 1\n",
119 | "\n",
120 | "Compression ratio: 2097152/1269 = 1652.6\n",
121 | "Relative error: tensor(0.0005)\n",
122 | "RMSE: tensor(22.0752)\n",
123 | "R^2: tensor(1.0000)\n"
124 | ]
125 | }
126 | ],
127 | "source": [
128 | "t = tn.Tensor(full, ranks_tucker=3) \n",
129 | "metrics()"
130 | ]
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "metadata": {},
135 | "source": [
136 | "Even though technically a TT-Tucker tensor, it has the [exact same expressive power](main_formats.ipynb) as a low-rank Tucker decomposition.\n",
137 | "\n",
138 | "The Tucker factors are `t.Us`. To retrieve the full Tucker core, use `tucker_core()`:"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": 4,
144 | "metadata": {},
145 | "outputs": [
146 | {
147 | "data": {
148 | "text/plain": [
149 | "torch.Size([3, 3, 3])"
150 | ]
151 | },
152 | "execution_count": 4,
153 | "metadata": {},
154 | "output_type": "execute_result"
155 | }
156 | ],
157 | "source": [
158 | "t.tucker_core().shape"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "## CP\n",
166 | "\n",
167 | "Use the `ranks_cp` argument:"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": 5,
173 | "metadata": {},
174 | "outputs": [
175 | {
176 | "name": "stdout",
177 | "output_type": "stream",
178 | "text": [
179 | "ALS -- initialization time = 0.045638084411621094\n",
180 | "iter: 0 | eps: 0.00098631 | total time: 0.0682\n",
181 | "iter: 1 | eps: 0.00092816 | total time: 0.0896 <- converged (tol=0.0001)\n",
182 | "3D CP tensor:\n",
183 | "\n",
184 | " 128 128 128\n",
185 | " | | |\n",
186 | " <0> <1> <2>\n",
187 | " / \\ / \\ / \\\n",
188 | "3 3 3 3\n",
189 | "\n",
190 | "Compression ratio: 2097152/1152 = 1820.44\n",
191 | "Relative error: tensor(0.0009)\n",
192 | "RMSE: tensor(39.9936)\n",
193 | "R^2: tensor(1.0000)\n"
194 | ]
195 | }
196 | ],
197 | "source": [
198 | "t = tn.Tensor(full, ranks_cp=3, verbose=True) # CP is computed using alternating least squares (ALS)\n",
199 | "metrics()"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "The CP factors are `t.cores` (they are all 2D tensors).\n",
207 | "\n",
208 | "## Hybrid Formats\n",
209 | "\n",
210 | "`ranks_tucker` can be combined with the other arguments to produce hybrid decompositions:"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": 6,
216 | "metadata": {},
217 | "outputs": [
218 | {
219 | "name": "stdout",
220 | "output_type": "stream",
221 | "text": [
222 | "3D CP-Tucker tensor:\n",
223 | "\n",
224 | " 128 128 128\n",
225 | " | | |\n",
226 | " 3 3 3\n",
227 | " <0> <1> <2>\n",
228 | " / \\ / \\ / \\\n",
229 | "3 3 3 3\n",
230 | "\n",
231 | "Compression ratio: 2097152/1179 = 1778.75\n",
232 | "Relative error: tensor(0.0035)\n",
233 | "RMSE: tensor(149.4028)\n",
234 | "R^2: tensor(1.0000)\n",
235 | "3D TT-Tucker tensor:\n",
236 | "\n",
237 | " 128 128 128\n",
238 | " | | |\n",
239 | " 4 4 4\n",
240 | " (0) (1) (2)\n",
241 | " / \\ / \\ / \\\n",
242 | "1 2 2 1\n",
243 | "\n",
244 | "Compression ratio: 2097152/1568 = 1337.47\n",
245 | "Relative error: tensor(0.0012)\n",
246 | "RMSE: tensor(51.8083)\n",
247 | "R^2: tensor(1.0000)\n"
248 | ]
249 | }
250 | ],
251 | "source": [
252 | "t = tn.Tensor(full, ranks_cp=3, ranks_tucker=3)\n",
253 | "metrics()\n",
254 | "t = tn.Tensor(full, ranks_tt=2, ranks_tucker=4)\n",
255 | "metrics()"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "## Error-bounded Decompositions\n",
263 | "\n",
264 | "If you instead pass the argument `eps`, a decomposition will be computed that will not exceed that relative error:"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": 7,
270 | "metadata": {},
271 | "outputs": [
272 | {
273 | "name": "stdout",
274 | "output_type": "stream",
275 | "text": [
276 | "3D TT-Tucker tensor:\n",
277 | "\n",
278 | " 128 128 128\n",
279 | " | | |\n",
280 | " 4 5 6\n",
281 | " (0) (1) (2)\n",
282 | " / \\ / \\ / \\\n",
283 | "1 4 6 1\n",
284 | "\n",
285 | "Compression ratio: 2097152/2092 = 1002.46\n",
286 | "Relative error: tensor(8.3402e-06)\n",
287 | "RMSE: tensor(0.3594)\n",
288 | "R^2: tensor(1.0000)\n"
289 | ]
290 | }
291 | ],
292 | "source": [
293 | "t = tn.Tensor(full, eps=1e-5)\n",
294 | "metrics()"
295 | ]
296 | },
297 | {
298 | "cell_type": "markdown",
299 | "metadata": {},
300 | "source": [
301 | "That will always try to compress in both Tucker and TT senses, and therefore will always produce a TT-Tucker tensor. If you only want to compress, say, in the Tucker sense, you can do:"
302 | ]
303 | },
304 | {
305 | "cell_type": "code",
306 | "execution_count": 8,
307 | "metadata": {},
308 | "outputs": [
309 | {
310 | "name": "stdout",
311 | "output_type": "stream",
312 | "text": [
313 | "3D TT-Tucker tensor:\n",
314 | "\n",
315 | " 128 128 128\n",
316 | " | | |\n",
317 | " 5 4 7\n",
318 | " (0) (1) (2)\n",
319 | " / \\ / \\ / \\\n",
320 | "1 28 7 1\n",
321 | "\n",
322 | "Compression ratio: 2097152/3021 = 694.191\n",
323 | "Relative error: tensor(4.0447e-06)\n",
324 | "RMSE: tensor(0.1743)\n",
325 | "R^2: tensor(1.0000)\n"
326 | ]
327 | }
328 | ],
329 | "source": [
330 | "t = tn.Tensor(full)\n",
331 | "t.round_tucker(eps=1e-5)\n",
332 | "metrics()"
333 | ]
334 | },
335 | {
336 | "cell_type": "markdown",
337 | "metadata": {},
338 | "source": [
339 | "And conversely, for a TT-only compression:"
340 | ]
341 | },
342 | {
343 | "cell_type": "code",
344 | "execution_count": 9,
345 | "metadata": {},
346 | "outputs": [
347 | {
348 | "name": "stdout",
349 | "output_type": "stream",
350 | "text": [
351 | "3D TT tensor:\n",
352 | "\n",
353 | " 128 128 128\n",
354 | " | | |\n",
355 | " (0) (1) (2)\n",
356 | " / \\ / \\ / \\\n",
357 | "1 4 6 1\n",
358 | "\n",
359 | "Compression ratio: 2097152/4352 = 481.882\n",
360 | "Relative error: tensor(8.3358e-06)\n",
361 | "RMSE: tensor(0.3592)\n",
362 | "R^2: tensor(1.0000)\n"
363 | ]
364 | }
365 | ],
366 | "source": [
367 | "t = tn.Tensor(full)\n",
368 | "t.round_tt(eps=1e-5)\n",
369 | "metrics()"
370 | ]
371 | }
372 | ],
373 | "metadata": {
374 | "kernelspec": {
375 | "display_name": "Python 3",
376 | "language": "python",
377 | "name": "python3"
378 | },
379 | "language_info": {
380 | "codemirror_mode": {
381 | "name": "ipython",
382 | "version": 3
383 | },
384 | "file_extension": ".py",
385 | "mimetype": "text/x-python",
386 | "name": "python",
387 | "nbconvert_exporter": "python",
388 | "pygments_lexer": "ipython3",
389 | "version": "3.7.2"
390 | }
391 | },
392 | "nbformat": 4,
393 | "nbformat_minor": 2
394 | }
395 |
--------------------------------------------------------------------------------
/docs/tutorials/derivatives.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Differentiation\n",
8 | "\n",
9 | "To derive a tensor network one just needs to derive each core along its spatial dimension (if it has one)."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "data": {
19 | "text/plain": [
20 | "3D TT tensor:\n",
21 | "\n",
22 | " 32 32 32\n",
23 | " | | |\n",
24 | " (0) (1) (2)\n",
25 | " / \\ / \\ / \\\n",
26 | "1 3 3 1"
27 | ]
28 | },
29 | "execution_count": 1,
30 | "metadata": {},
31 | "output_type": "execute_result"
32 | }
33 | ],
34 | "source": [
35 | "import torch\n",
36 | "import tntorch as tn\n",
37 | "\n",
38 | "t = tn.rand([32]*3, ranks_tt=3, requires_grad=True)\n",
39 | "t"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "### Basic Derivatives\n",
47 | "\n",
48 | "To derive w.r.t. one or several variables, use `partial()`:"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 2,
54 | "metadata": {},
55 | "outputs": [
56 | {
57 | "data": {
58 | "text/plain": [
59 | "3D TT tensor:\n",
60 | "\n",
61 | " 32 32 32\n",
62 | " | | |\n",
63 | " (0) (1) (2)\n",
64 | " / \\ / \\ / \\\n",
65 | "1 3 3 1"
66 | ]
67 | },
68 | "execution_count": 2,
69 | "metadata": {},
70 | "output_type": "execute_result"
71 | }
72 | ],
73 | "source": [
74 | "tn.partial(t, dim=[0, 1], order=2)"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {},
80 | "source": [
81 | "### Many Derivatives at Once\n",
82 | "\n",
83 | "Thanks to [mask tensors](logic.ipynb) we can specify and consider groups of many derivatives at once using the function `partialset()`. For example, the following tensor encodes *all* 2nd-order derivatives that contain $x$:"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 3,
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "name": "stdout",
93 | "output_type": "stream",
94 | "text": [
95 | "3D TT tensor:\n",
96 | "\n",
97 | " 96 96 96\n",
98 | " | | |\n",
99 | " (0) (1) (2)\n",
100 | " / \\ / \\ / \\\n",
101 | "1 9 9 1\n",
102 | "\n"
103 | ]
104 | }
105 | ],
106 | "source": [
107 | "x, y, z = tn.symbols(t.dim())\n",
108 | "d = tn.partialset(t, order=2, mask=x)\n",
109 | "print(d)"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "We can check by summing squared norms:"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": 4,
122 | "metadata": {},
123 | "outputs": [
124 | {
125 | "name": "stdout",
126 | "output_type": "stream",
127 | "text": [
128 | "tensor(48342.2888, grad_fn=)\n",
129 | "tensor(48342.2888, grad_fn=)\n"
130 | ]
131 | }
132 | ],
133 | "source": [
134 | "print(tn.normsq(d))\n",
135 | "print(tn.normsq(tn.partial(t, 0, order=2)) + tn.normsq(tn.partial(t, [0, 1], order=1)) + tn.normsq(tn.partial(t, [0, 2], order=1)))"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "metadata": {},
141 | "source": [
142 | "The method with masks is attractive because its cost scales linearly with dimensionality $N$. Computing all order-$O$ derivatives costs $O(N O^3 R^2)$ with `partialset()` vs. $O(N^{(O+1)} R^2)$ with the naive `partial()`.\n",
143 | "\n",
144 | "### Applications\n",
145 | "\n",
146 | "See [this notebook](completion.ipynb) for an example of tensor optimization that tries to maximize an interpolator's smoothness. Tensor derivatives are also used for some [vector field](vector_fields.ipynb) computations and in the [active subspace method](active_subspaces.ipynb)."
147 | ]
148 | }
149 | ],
150 | "metadata": {
151 | "kernelspec": {
152 | "display_name": "Python 3",
153 | "language": "python",
154 | "name": "python3"
155 | },
156 | "language_info": {
157 | "codemirror_mode": {
158 | "name": "ipython",
159 | "version": 3
160 | },
161 | "file_extension": ".py",
162 | "mimetype": "text/x-python",
163 | "name": "python",
164 | "nbconvert_exporter": "python",
165 | "pygments_lexer": "ipython3",
166 | "version": "3.7.2"
167 | }
168 | },
169 | "nbformat": 4,
170 | "nbformat_minor": 2
171 | }
172 |
--------------------------------------------------------------------------------
/docs/tutorials/logic.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Boolean Logic\n",
8 | "\n",
9 | "Tensor networks make for a great representation of Boolean expressions. Let's have a look at how to build tensor Boolean formulas and what we can do with them."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import tntorch as tn\n",
19 | "import torch\n",
20 | "\n",
21 | "p, q, r = tn.symbols(3)"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "These symbols are plain *tntorch* tensors (in this case, 3D ones) and have rank 1 each:"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 2,
34 | "metadata": {},
35 | "outputs": [
36 | {
37 | "data": {
38 | "text/plain": [
39 | "3D TT tensor:\n",
40 | "\n",
41 | " 2 2 2\n",
42 | " | | |\n",
43 | " (0) (1) (2)\n",
44 | " / \\ / \\ / \\\n",
45 | "1 1 1 1"
46 | ]
47 | },
48 | "execution_count": 2,
49 | "metadata": {},
50 | "output_type": "execute_result"
51 | }
52 | ],
53 | "source": [
54 | "p"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "The fun starts with overloaded Boolean operators:"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": 3,
67 | "metadata": {},
68 | "outputs": [
69 | {
70 | "data": {
71 | "text/plain": [
72 | "True"
73 | ]
74 | },
75 | "execution_count": 3,
76 | "metadata": {},
77 | "output_type": "execute_result"
78 | }
79 | ],
80 | "source": [
81 | "tn.is_tautology(p | ~p) # Formula is always true"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 4,
87 | "metadata": {},
88 | "outputs": [
89 | {
90 | "data": {
91 | "text/plain": [
92 | "True"
93 | ]
94 | },
95 | "execution_count": 4,
96 | "metadata": {},
97 | "output_type": "execute_result"
98 | }
99 | ],
100 | "source": [
101 | "tn.is_contradiction(p & ~p) # Formula is always false"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": 5,
107 | "metadata": {},
108 | "outputs": [
109 | {
110 | "data": {
111 | "text/plain": [
112 | "True"
113 | ]
114 | },
115 | "execution_count": 5,
116 | "metadata": {},
117 | "output_type": "execute_result"
118 | }
119 | ],
120 | "source": [
121 | "tn.is_satisfiable(p ^ q | r) # Formula is true for some values"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 6,
127 | "metadata": {},
128 | "outputs": [
129 | {
130 | "data": {
131 | "text/plain": [
132 | "True"
133 | ]
134 | },
135 | "execution_count": 6,
136 | "metadata": {},
137 | "output_type": "execute_result"
138 | }
139 | ],
140 | "source": [
141 | "tn.implies(p & q | q & r, q) # First formula implies the second"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": 7,
147 | "metadata": {},
148 | "outputs": [
149 | {
150 | "data": {
151 | "text/plain": [
152 | "True"
153 | ]
154 | },
155 | "execution_count": 7,
156 | "metadata": {},
157 | "output_type": "execute_result"
158 | }
159 | ],
160 | "source": [
161 | "tn.equiv(p & q, ~(~p | ~q)) # A double implication (De Morgan's law)"
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "metadata": {},
167 | "source": [
168 | "Check out the quantifiers too:"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": 8,
174 | "metadata": {},
175 | "outputs": [
176 | {
177 | "data": {
178 | "text/plain": [
179 | "True"
180 | ]
181 | },
182 | "execution_count": 8,
183 | "metadata": {},
184 | "output_type": "execute_result"
185 | }
186 | ],
187 | "source": [
188 | "tn.equiv(tn.all(3), p & q & r) # \"for all\""
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 9,
194 | "metadata": {},
195 | "outputs": [
196 | {
197 | "data": {
198 | "text/plain": [
199 | "True"
200 | ]
201 | },
202 | "execution_count": 9,
203 | "metadata": {},
204 | "output_type": "execute_result"
205 | }
206 | ],
207 | "source": [
208 | "tn.equiv(tn.any(3), p | q | r) # \"exists\""
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": 10,
214 | "metadata": {},
215 | "outputs": [
216 | {
217 | "data": {
218 | "text/plain": [
219 | "True"
220 | ]
221 | },
222 | "execution_count": 10,
223 | "metadata": {},
224 | "output_type": "execute_result"
225 | }
226 | ],
227 | "source": [
228 | "tn.equiv(tn.none(3), ~tn.any(3)) # \"does not exist\""
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": 11,
234 | "metadata": {},
235 | "outputs": [
236 | {
237 | "data": {
238 | "text/plain": [
239 | "True"
240 | ]
241 | },
242 | "execution_count": 11,
243 | "metadata": {},
244 | "output_type": "execute_result"
245 | }
246 | ],
247 | "source": [
248 | "tn.equiv(tn.one(3), p&~q&~r | ~p&q&~r | ~p&~q&r) # \"exists and is unique\""
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "To find out the relevant symbols of a formula (those whose values can affect its result), do the following:"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": 12,
261 | "metadata": {},
262 | "outputs": [
263 | {
264 | "data": {
265 | "text/plain": [
266 | "[1]"
267 | ]
268 | },
269 | "execution_count": 12,
270 | "metadata": {},
271 | "output_type": "execute_result"
272 | }
273 | ],
274 | "source": [
275 | "tn.relevant_symbols(p & q | ~p & q)"
276 | ]
277 | },
278 | {
279 | "cell_type": "markdown",
280 | "metadata": {},
281 | "source": [
282 | "The ranks grow exponentially after each binary operation. It is recommended to run `round()` to try to reduce those ranks, often saving up memory and computational costs:"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": 13,
288 | "metadata": {},
289 | "outputs": [
290 | {
291 | "name": "stdout",
292 | "output_type": "stream",
293 | "text": [
294 | "3D TT tensor:\n",
295 | "\n",
296 | " 2 2 2\n",
297 | " | | |\n",
298 | " (0) (1) (2)\n",
299 | " / \\ / \\ / \\\n",
300 | "1 11 11 1\n",
301 | "\n",
302 | "3D TT tensor:\n",
303 | "\n",
304 | " 2 2 2\n",
305 | " | | |\n",
306 | " (0) (1) (2)\n",
307 | " / \\ / \\ / \\\n",
308 | "1 2 2 1\n",
309 | "\n"
310 | ]
311 | }
312 | ],
313 | "source": [
314 | "formula = ~p | (p & q) | (q & r)\n",
315 | "print(formula)\n",
316 | "formula.round()\n",
317 | "print(formula)"
318 | ]
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "metadata": {},
323 | "source": [
324 | "How many ways are there to satisfy a given Boolean formula? This is known as the [#SAT problem](https://en.wikipedia.org/wiki/Sharp-SAT). For us, it's just the sum of elements of the tensor:"
325 | ]
326 | },
327 | {
328 | "cell_type": "code",
329 | "execution_count": 14,
330 | "metadata": {},
331 | "outputs": [
332 | {
333 | "data": {
334 | "text/plain": [
335 | "tensor(6.0000)"
336 | ]
337 | },
338 | "execution_count": 14,
339 | "metadata": {},
340 | "output_type": "execute_result"
341 | }
342 | ],
343 | "source": [
344 | "tn.sum(formula)"
345 | ]
346 | },
347 | {
348 | "cell_type": "markdown",
349 | "metadata": {},
350 | "source": [
351 | "We can look at all the inputs that satisfy this formula with the function `accepted_inputs()`:"
352 | ]
353 | },
354 | {
355 | "cell_type": "code",
356 | "execution_count": 15,
357 | "metadata": {},
358 | "outputs": [
359 | {
360 | "data": {
361 | "text/plain": [
362 | "tensor([[0, 0, 0],\n",
363 | " [0, 0, 1],\n",
364 | " [0, 1, 0],\n",
365 | " [0, 1, 1],\n",
366 | " [1, 1, 0],\n",
367 | " [1, 1, 1]])"
368 | ]
369 | },
370 | "execution_count": 15,
371 | "metadata": {},
372 | "output_type": "execute_result"
373 | }
374 | ],
375 | "source": [
376 | "tn.accepted_inputs(formula) # One row per accepted input"
377 | ]
378 | },
379 | {
380 | "cell_type": "markdown",
381 | "metadata": {},
382 | "source": [
383 | "The function `only()` is useful to enforce that *only* certain variables play a role:"
384 | ]
385 | },
386 | {
387 | "cell_type": "code",
388 | "execution_count": 16,
389 | "metadata": {},
390 | "outputs": [
391 | {
392 | "data": {
393 | "text/plain": [
394 | "tensor([[0, 0, 1],\n",
395 | " [1, 0, 0]])"
396 | ]
397 | },
398 | "execution_count": 16,
399 | "metadata": {},
400 | "output_type": "execute_result"
401 | }
402 | ],
403 | "source": [
404 | "tn.accepted_inputs(tn.only(p) | tn.only(r))"
405 | ]
406 | },
407 | {
408 | "cell_type": "markdown",
409 | "metadata": {},
410 | "source": [
411 | "### Applications\n",
412 | "\n",
413 | "Boolean tensors can be applied in [ANOVA](anova.ipynb) and [variance-based sensitivity analysis](sobol.ipynb) as well as for compact [derivative computation](derivatives.ipynb)."
414 | ]
415 | }
416 | ],
417 | "metadata": {
418 | "kernelspec": {
419 | "display_name": "Python 3",
420 | "language": "python",
421 | "name": "python3"
422 | },
423 | "language_info": {
424 | "codemirror_mode": {
425 | "name": "ipython",
426 | "version": 3
427 | },
428 | "file_extension": ".py",
429 | "mimetype": "text/x-python",
430 | "name": "python",
431 | "nbconvert_exporter": "python",
432 | "pygments_lexer": "ipython3",
433 | "version": "3.7.2"
434 | }
435 | },
436 | "nbformat": 4,
437 | "nbformat_minor": 2
438 | }
439 |
--------------------------------------------------------------------------------
/docs/tutorials/main_formats.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Main Tensor Formats\n",
8 | "\n",
9 | "Three of the most popular tensor decompositions are supported in *tntorch*:\n",
10 | "\n",
11 | "- [CANDECOMP/PARAFAC (CP)](https://epubs.siam.org/doi/pdf/10.1137/07070111X)\n",
12 | "- [Tucker](https://epubs.siam.org/doi/pdf/10.1137/S0895479898346995)\n",
13 | "- [Tensor Train (TT)](https://epubs.siam.org/doi/abs/10.1137/090752286?journalCode=sjoce3)\n",
14 | "\n",
15 | "Those formats are all represented using $N$ *tensor cores* (one per tensor dimension, used for CP/TT) and, optionally, up to $N$ *factor matrices* (needed for Tucker).\n",
16 | "\n",
17 | "In an $N$-D tensor of shape $I_1 \\times \\dots \\times I_N$, each $n$-th core can come in four flavors:\n",
18 | "\n",
19 | "- $R^{\\mathrm{TT}}_n \\times I_n \\times R^{\\mathrm{TT}}_{n+1}$: a TT core.\n",
20 | "- $R^{\\mathrm{TT}}_n \\times S_n^{\\mathrm{Tucker}} \\times R^{\\mathrm{TT}}_{n+1}$: a TT-Tucker core, accompanied by an $I_n \\times S_n^{\\mathrm{Tucker}}$ factor matrix.\n",
21 | "- $I_n \\times R^{\\mathrm{CP}}_n$: a CP core. Conceptually, it works as if it were a 3D TT core of shape $R^{\\mathrm{CP}}_n \\times I_n \\times R^{\\mathrm{CP}}_n$ whose slices along the 2nd mode are all diagonal matrices.\n",
22 | "- $S_n^{\\mathrm{Tucker}} \\times R^{\\mathrm{CP}}_n$: a CP-Tucker core, accompanied by an $I_n \\times S_n^{\\mathrm{Tucker}}$ factor matrix. Conceptually, it works as a 3D TT-Tucker core.\n",
23 | "\n",
24 | "One tensor network can combine cores of different kinds. So all in all one may have TT, TT-Tucker, Tucker, CP, TT-CP, CP-Tucker, and TT-CP-Tucker tensors. We will show examples of all.\n",
25 | "\n",
26 | "*(see [this notebook](decompositions.ipynb) to decompose full tensors into those main formats)*\n",
27 | "\n",
28 | "*(see [this notebook](other_formats.ipynb) for other structured and custom decompositions)*"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "## TT\n",
36 | "\n",
37 | "Tensor train cores are represented in parentheses `( )`:"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": 1,
43 | "metadata": {},
44 | "outputs": [
45 | {
46 | "data": {
47 | "text/plain": [
48 | "5D TT tensor:\n",
49 | "\n",
50 | " 32 32 32 32 32\n",
51 | " | | | | |\n",
52 | " (0) (1) (2) (3) (4)\n",
53 | " / \\ / \\ / \\ / \\ / \\\n",
54 | "1 5 5 5 5 1"
55 | ]
56 | },
57 | "execution_count": 1,
58 | "metadata": {},
59 | "output_type": "execute_result"
60 | }
61 | ],
62 | "source": [
63 | "import tntorch as tn\n",
64 | "\n",
65 | "tn.rand([32]*5, ranks_tt=5)"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "## TT-Tucker\n",
73 | "\n",
74 | "In this format, TT cores are compressed along their spatial dimension (2nd mode) using an accompanying Tucker factor. This was considered e.g. in the original [TT paper](https://epubs.siam.org/doi/abs/10.1137/090752286?journalCode=sjoce3)."
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 2,
80 | "metadata": {},
81 | "outputs": [
82 | {
83 | "data": {
84 | "text/plain": [
85 | "5D TT-Tucker tensor:\n",
86 | "\n",
87 | " 32 32 32 32 32\n",
88 | " | | | | |\n",
89 | " 6 6 6 6 6\n",
90 | " (0) (1) (2) (3) (4)\n",
91 | " / \\ / \\ / \\ / \\ / \\\n",
92 | "1 5 5 5 5 1"
93 | ]
94 | },
95 | "execution_count": 2,
96 | "metadata": {},
97 | "output_type": "execute_result"
98 | }
99 | ],
100 | "source": [
101 | "tn.rand([32]*5, ranks_tt=5, ranks_tucker=6)"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "Here is an example where only *some* cores have Tucker factors:"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": 3,
114 | "metadata": {},
115 | "outputs": [
116 | {
117 | "data": {
118 | "text/plain": [
119 | "5D TT-Tucker tensor:\n",
120 | "\n",
121 | " 32 32\n",
122 | " 32 | 32 32 |\n",
123 | " | 6 | | 7\n",
124 | " (0) (1) (2) (3) (4)\n",
125 | " / \\ / \\ / \\ / \\ / \\\n",
126 | "1 5 5 5 5 1"
127 | ]
128 | },
129 | "execution_count": 3,
130 | "metadata": {},
131 | "output_type": "execute_result"
132 | }
133 | ],
134 | "source": [
135 | "tn.rand([32]*5, ranks_tt=5, ranks_tucker=[None, 6, None, None, 7])"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "metadata": {},
141 | "source": [
142 | "Note: if you want to leave some factors fixed during gradient descent, simply set them to some PyTorch tensor that has requires_grad=False."
143 | ]
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "metadata": {},
148 | "source": [
149 | "## Tucker\n",
150 | "\n",
151 | "\"Pure\" Tucker is technically speaking not supported, but is equivalent to a TT-Tucker tensor with full TT-ranks. The minimal necessary ranks are automatically computed and set up for you:"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 4,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "data": {
161 | "text/plain": [
162 | "5D TT-Tucker tensor:\n",
163 | "\n",
164 | " 32 32 32 32 32\n",
165 | " | | | | |\n",
166 | " 3 3 3 3 3\n",
167 | " (0) (1) (2) (3) (4)\n",
168 | " / \\ / \\ / \\ / \\ / \\\n",
169 | "1 3 9 9 3 1"
170 | ]
171 | },
172 | "execution_count": 4,
173 | "metadata": {},
174 | "output_type": "execute_result"
175 | }
176 | ],
177 | "source": [
178 | "tn.rand([32]*5, ranks_tucker=3) # Actually a TT-Tucker network, but just as expressive as a pure Tucker decomposition"
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "In other words, all $32^5$ tensors of Tucker rank $3$ can be represented by a tensor that has the shape shown above, and vice versa."
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {},
191 | "source": [
192 | "## CP\n",
193 | "\n",
194 | "CP factors are shown as cores in brackets `< >`:"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": 5,
200 | "metadata": {},
201 | "outputs": [
202 | {
203 | "data": {
204 | "text/plain": [
205 | "5D CP tensor:\n",
206 | "\n",
207 | " 32 32 32 32 32\n",
208 | " | | | | |\n",
209 | " <0> <1> <2> <3> <4>\n",
210 | " / \\ / \\ / \\ / \\ / \\\n",
211 | "4 4 4 4 4 4"
212 | ]
213 | },
214 | "execution_count": 5,
215 | "metadata": {},
216 | "output_type": "execute_result"
217 | }
218 | ],
219 | "source": [
220 | "tn.rand([32]*5, ranks_cp=4)"
221 | ]
222 | },
223 | {
224 | "cell_type": "markdown",
225 | "metadata": {},
226 | "source": [
227 | "Even though those factors work conceptually as 3D cores in a tensor train (every CP tensor [is a particular case](https://epubs.siam.org/doi/abs/10.1137/090752286?journalCode=sjoce3) of the TT format), they are stored in 2D as in a standard CP decomposition. In this case all cores have shape $32 \\times 4$.\n",
228 | "\n",
229 | "## TT-CP\n",
230 | "\n",
231 | "TT and CP cores can be combined by specifying lists of ranks for each format. You should provide $N-1$ TT ranks and $N$ CP ranks and use `None` so that they do not collide anywhere. Also note that consecutive CP ranks must coincide. Here is a tensor with 3 TT cores and 2 CP cores:"
232 | ]
233 | },
234 | {
235 | "cell_type": "code",
236 | "execution_count": 6,
237 | "metadata": {},
238 | "outputs": [
239 | {
240 | "data": {
241 | "text/plain": [
242 | "5D TT-CP tensor:\n",
243 | "\n",
244 | " 32 32 32 32 32\n",
245 | " | | | | |\n",
246 | " (0) (1) (2) <3> <4>\n",
247 | " / \\ / \\ / \\ / \\ / \\\n",
248 | "1 2 3 4 4 4"
249 | ]
250 | },
251 | "execution_count": 6,
252 | "metadata": {},
253 | "output_type": "execute_result"
254 | }
255 | ],
256 | "source": [
257 | "tn.rand([32]*5, ranks_tt=[2, 3, None, None], ranks_cp=[None, None, None, 4, 4])"
258 | ]
259 | },
260 | {
261 | "cell_type": "markdown",
262 | "metadata": {},
263 | "source": [
264 | "Here is another example with 2 TT cores and 3 CP cores:"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": 7,
270 | "metadata": {},
271 | "outputs": [
272 | {
273 | "data": {
274 | "text/plain": [
275 | "5D TT-CP tensor:\n",
276 | "\n",
277 | " 32 32 32 32 32\n",
278 | " | | | | |\n",
279 | " <0> (1) (2) <3> <4>\n",
280 | " / \\ / \\ / \\ / \\ / \\\n",
281 | "4 4 2 5 5 5"
282 | ]
283 | },
284 | "execution_count": 7,
285 | "metadata": {},
286 | "output_type": "execute_result"
287 | }
288 | ],
289 | "source": [
290 | "tn.rand([32]*5, ranks_tt=[None, 2, None, None], ranks_cp=[4, None, None, 5, 5])"
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {},
296 | "source": [
297 | "## CP-Tucker\n",
298 | "\n",
299 | "Similarly to TT-Tucker, this model restricts the columns along CP factors to live in a low-dimensional subspace. It is also known as a [canonical decomposition with linear constraints (*CANDELINC*)](https://link.springer.com/article/10.1007/BF02293596). [Compressing a Tucker core via CP](https://www.degruyter.com/downloadpdf/j/math.2007.5.issue-3/s11533-007-0018-0/s11533-007-0018-0.pdf) leads to an equivalent format."
300 | ]
301 | },
302 | {
303 | "cell_type": "code",
304 | "execution_count": 8,
305 | "metadata": {},
306 | "outputs": [
307 | {
308 | "data": {
309 | "text/plain": [
310 | "5D CP-Tucker tensor:\n",
311 | "\n",
312 | " 32 32 32 32 32\n",
313 | " | | | | |\n",
314 | " 4 4 4 4 4\n",
315 | " <0> <1> <2> <3> <4>\n",
316 | " / \\ / \\ / \\ / \\ / \\\n",
317 | "2 2 2 2 2 2"
318 | ]
319 | },
320 | "execution_count": 8,
321 | "metadata": {},
322 | "output_type": "execute_result"
323 | }
324 | ],
325 | "source": [
326 | "tn.rand([32]*5, ranks_cp=2, ranks_tucker=4)"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "## TT-CP-Tucker\n",
334 | "\n",
335 | "Finally, we can combine all sorts of cores to get a hybrid of all 3 models:"
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": 9,
341 | "metadata": {},
342 | "outputs": [
343 | {
344 | "data": {
345 | "text/plain": [
346 | "5D TT-CP-Tucker tensor:\n",
347 | "\n",
348 | " 32 32 32 \n",
349 | " | 32 | | 32\n",
350 | " 5 | 5 5 |\n",
351 | " (0) (1) (2) <3> <4>\n",
352 | " / \\ / \\ / \\ / \\ / \\\n",
353 | "1 2 3 10 10 10"
354 | ]
355 | },
356 | "execution_count": 9,
357 | "metadata": {},
358 | "output_type": "execute_result"
359 | }
360 | ],
361 | "source": [
362 | "tn.rand([32]*5, ranks_tt=[2, 3, None, None], ranks_cp=[None, None, None, 10, 10], ranks_tucker=[5, None, 5, 5, None])"
363 | ]
364 | }
365 | ],
366 | "metadata": {
367 | "kernelspec": {
368 | "display_name": "Python 3",
369 | "language": "python",
370 | "name": "python3"
371 | },
372 | "language_info": {
373 | "codemirror_mode": {
374 | "name": "ipython",
375 | "version": 3
376 | },
377 | "file_extension": ".py",
378 | "mimetype": "text/x-python",
379 | "name": "python",
380 | "nbconvert_exporter": "python",
381 | "pygments_lexer": "ipython3",
382 | "version": "3.7.2"
383 | }
384 | },
385 | "nbformat": 4,
386 | "nbformat_minor": 2
387 | }
388 |
--------------------------------------------------------------------------------
/docs/tutorials/other_formats.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Other Tensor Formats\n",
8 | "\n",
9 | "Besides the [natively supported formats](main_formats.ipynb), you can use *tntorch* to emulate other structured tensor decompositions (or at least, some of their functionality). \n",
10 | "\n",
11 | "Reference: all the following models are surveyed in [*\"Tensor Decompositions and Applications\"*, by Kolda and Bader (2009)](https://epubs.siam.org/doi/pdf/10.1137/07070111X)."
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "## INDSCAL\n",
19 | "\n",
20 | "*Individual differences in scaling* (INDSCAL) is just a 3D CP decomposition with two shared factors, say the first two."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 1,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "3D CP tensor:\n",
33 | "\n",
34 | " 10 10 64\n",
35 | " | | |\n",
36 | " <0> <1> <2>\n",
37 | " / \\ / \\ / \\\n",
38 | "8 8 8 8\n",
39 | "\n",
40 | "tensor(0.0559, grad_fn=)\n"
41 | ]
42 | }
43 | ],
44 | "source": [
45 | "import tntorch as tn\n",
46 | "import torch\n",
47 | "\n",
48 | "\n",
49 | "def INDSCAL(shape, rank):\n",
50 | " \n",
51 | " assert len(shape) == 3\n",
52 | " assert shape[0] == shape[1]\n",
53 | " \n",
54 | " A = torch.randn(shape[0], rank, requires_grad=True)\n",
55 | " B = A # The first two cores share the same memory\n",
56 | " C = torch.randn(shape[2], rank, requires_grad=True)\n",
57 | "\n",
58 | " return tn.Tensor([A, B, C])\n",
59 | " \n",
60 | "t = INDSCAL([10, 10, 64], 8)\n",
61 | "print(t)\n",
62 | "print(tn.mean(t))"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "This tensor's two first factors are the same PyTorch tensor in memory. So if we optimize (fit) the tensor they will stay the same, as is desirable."
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "## CANDELINC\n",
77 | "\n",
78 | "CANDELINC (*canonical decomposition with linear constraints*) is a CP decomposition such that each factor is compressed along its columns by an additional given matrix (the *linear constraints*). In other words, it is a CP-Tucker format with fixed Tucker factors."
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 2,
84 | "metadata": {},
85 | "outputs": [
86 | {
87 | "data": {
88 | "text/plain": [
89 | "3D CP-Tucker tensor:\n",
90 | "\n",
91 | " 10 11 12\n",
92 | " | | |\n",
93 | " 5 6 7\n",
94 | " <0> <1> <2>\n",
95 | " / \\ / \\ / \\\n",
96 | "3 3 3 3"
97 | ]
98 | },
99 | "execution_count": 2,
100 | "metadata": {},
101 | "output_type": "execute_result"
102 | }
103 | ],
104 | "source": [
105 | "def CANDELINC(rank, constraints): # `constraints` are N In x Sn matrices encoding the linear constraints for the N CP factors\n",
106 | " cores = [torch.randn(c.shape[1], rank, requires_grad=True) for c in constraints]\n",
107 | " return tn.Tensor(cores, constraints)\n",
108 | "\n",
109 | "N = 3\n",
110 | "rank = 3\n",
111 | "constraints = [torch.randn(10, 5), torch.randn(11, 6), torch.randn(12, 7)]\n",
112 | "CANDELINC(rank, constraints)"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "## DEDICOM\n",
120 | "\n",
121 | "In three-way *decomposition into directional components* (DEDICOM), 5 factors interact to encode a 3D tensor (2 of those factors are repeated). All factors use the same rank."
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 3,
127 | "metadata": {},
128 | "outputs": [
129 | {
130 | "data": {
131 | "text/plain": [
132 | "5D TT-CP tensor:\n",
133 | "\n",
134 | " 10 64 1 64 10\n",
135 | " | | | | |\n",
136 | " <0> <1> (2) <3> <4>\n",
137 | " / \\ / \\ / \\ / \\ / \\\n",
138 | "8 8 8 8 8 8"
139 | ]
140 | },
141 | "execution_count": 3,
142 | "metadata": {},
143 | "output_type": "execute_result"
144 | }
145 | ],
146 | "source": [
147 | "def DEDICOM(shape, rank):\n",
148 | " \n",
149 | " assert len(shape) == 3\n",
150 | " assert shape[0] == shape[2]\n",
151 | " \n",
152 | " A = torch.randn(shape[0], rank, requires_grad=True)\n",
153 | " D = torch.randn(shape[1], rank, requires_grad=True)\n",
154 | " R = torch.randn(rank, 1, rank, requires_grad=True)\n",
155 | "\n",
156 | " return tn.Tensor([A, D, R, D, A])\n",
157 | " \n",
158 | "DEDICOM([10, 64, 10], 8)"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "Note that this tensor is to be accessed via a special pattern (`t[i, j, k]` should be written as `t[i, j, 0, j, k]`). Some routines (e.g. `numel()`, `torch()`, `norm()`, etc.) that are unaware of this special structure will not work properly."
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "metadata": {},
171 | "source": [
172 | "## PARATUCK2\n",
173 | "\n",
174 | "PARATUCK2 is a variant of DEDICOM in which no factors are repeated, and two different ranks intervene."
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 4,
180 | "metadata": {},
181 | "outputs": [
182 | {
183 | "data": {
184 | "text/plain": [
185 | "5D TT-CP tensor:\n",
186 | "\n",
187 | " 10 64 1 64 10\n",
188 | " | | | | |\n",
189 | " <0> <1> (2) <3> <4>\n",
190 | " / \\ / \\ / \\ / \\ / \\\n",
191 | "7 7 7 8 8 8"
192 | ]
193 | },
194 | "execution_count": 4,
195 | "metadata": {},
196 | "output_type": "execute_result"
197 | }
198 | ],
199 | "source": [
200 | "def PARATUCK2(shape, ranks):\n",
201 | " \n",
202 | " assert len(shape) == 3\n",
203 | " assert shape[0] == shape[2]\n",
204 | " assert len(ranks) == 2\n",
205 | " \n",
206 | " A = torch.randn(shape[0], ranks[0], requires_grad=True)\n",
207 | " DA = torch.randn(shape[1], ranks[0], requires_grad=True)\n",
208 | " R = torch.randn(ranks[0], 1, ranks[1], requires_grad=True)\n",
209 | " DB = torch.randn(shape[1], ranks[1], requires_grad=True)\n",
210 | " B = torch.randn(shape[2], ranks[1], requires_grad=True)\n",
211 | "\n",
212 | " return tn.Tensor([A, DA, R, DB, B])\n",
213 | " \n",
214 | "PARATUCK2([10, 64, 10], [7, 8])"
215 | ]
216 | }
217 | ],
218 | "metadata": {
219 | "kernelspec": {
220 | "display_name": "Python 3",
221 | "language": "python",
222 | "name": "python3"
223 | },
224 | "language_info": {
225 | "codemirror_mode": {
226 | "name": "ipython",
227 | "version": 3
228 | },
229 | "file_extension": ".py",
230 | "mimetype": "text/x-python",
231 | "name": "python",
232 | "nbconvert_exporter": "python",
233 | "pygments_lexer": "ipython3",
234 | "version": "3.7.2"
235 | }
236 | },
237 | "nbformat": 4,
238 | "nbformat_minor": 2
239 | }
240 |
--------------------------------------------------------------------------------
/docs/tutorials/vector_fields.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Vector Fields\n",
8 | "\n",
9 | "The standard way to represent $N$-dimensional vector fields in *tntorch* is via a list of $N$ tensors, each of which has $N$ dimensions. Functions that accept or return vector fields do so in that form. For example, `gradient()`:"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 11,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "[3D TT tensor:\n",
22 | "\n",
23 | " 64 64 64\n",
24 | " | | |\n",
25 | " (0) (1) (2)\n",
26 | " / \\ / \\ / \\\n",
27 | "1 10 10 1\n",
28 | ", 3D TT tensor:\n",
29 | "\n",
30 | " 64 64 64\n",
31 | " | | |\n",
32 | " (0) (1) (2)\n",
33 | " / \\ / \\ / \\\n",
34 | "1 10 10 1\n",
35 | ", 3D TT tensor:\n",
36 | "\n",
37 | " 64 64 64\n",
38 | " | | |\n",
39 | " (0) (1) (2)\n",
40 | " / \\ / \\ / \\\n",
41 | "1 10 10 1\n",
42 | "]\n"
43 | ]
44 | }
45 | ],
46 | "source": [
47 | "import torch\n",
48 | "import tntorch as tn\n",
49 | "\n",
50 | "t = tn.rand([64]*3, ranks_tt=10)\n",
51 | "grad = tn.gradient(t)\n",
52 | "print(grad)"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "We can check that the curl of any gradient is 0:"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": 12,
65 | "metadata": {},
66 | "outputs": [
67 | {
68 | "name": "stdout",
69 | "output_type": "stream",
70 | "text": [
71 | "tensor(1.0344e-06)\n",
72 | "tensor(1.4569e-06)\n",
73 | "tensor(2.5995e-06)\n"
74 | ]
75 | }
76 | ],
77 | "source": [
78 | "curl = tn.curl(tn.gradient(t)) # List of 3 3D tensors\n",
79 | "print(tn.norm(curl[0]))\n",
80 | "print(tn.norm(curl[1]))\n",
81 | "print(tn.norm(curl[2]))"
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "metadata": {},
87 | "source": [
88 | "Let's also check that the divergence of any curl is zero (we'll use a random, non-gradient vector field here):"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 3,
94 | "metadata": {},
95 | "outputs": [
96 | {
97 | "data": {
98 | "text/plain": [
99 | "tensor(4.8832e-08)"
100 | ]
101 | },
102 | "execution_count": 3,
103 | "metadata": {},
104 | "output_type": "execute_result"
105 | }
106 | ],
107 | "source": [
108 | "vf = [tn.rand([64]*3, ranks_tt=1) for n in range(3)]\n",
109 | "tn.norm(tn.divergence(tn.curl(vf)))"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "... and that the Laplacian of a scalar field `t` equals the divergence of `t`'s gradient:"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": 5,
122 | "metadata": {},
123 | "outputs": [
124 | {
125 | "data": {
126 | "text/plain": [
127 | "tensor(0.)"
128 | ]
129 | },
130 | "execution_count": 5,
131 | "metadata": {},
132 | "output_type": "execute_result"
133 | }
134 | ],
135 | "source": [
136 | "tn.norm(tn.laplacian(t) - tn.divergence(tn.gradient(t)))"
137 | ]
138 | }
139 | ],
140 | "metadata": {
141 | "kernelspec": {
142 | "display_name": "Python 3",
143 | "language": "python",
144 | "name": "python3"
145 | },
146 | "language_info": {
147 | "codemirror_mode": {
148 | "name": "ipython",
149 | "version": 3
150 | },
151 | "file_extension": ".py",
152 | "mimetype": "text/x-python",
153 | "name": "python",
154 | "nbconvert_exporter": "python",
155 | "pygments_lexer": "ipython3",
156 | "version": "3.6.6"
157 | }
158 | },
159 | "nbformat": 4,
160 | "nbformat_minor": 2
161 | }
162 |
--------------------------------------------------------------------------------
/images/tensors.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VMML/tntorch/8c81a1cbb0c5b19db7c26a787acfca35e0fbd960/images/tensors.jpg
--------------------------------------------------------------------------------
/images/text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VMML/tntorch/8c81a1cbb0c5b19db7c26a787acfca35e0fbd960/images/text.png
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 |
5 | try:
6 | from setuptools import setup
7 | except ImportError:
8 | from distutils.core import setup
9 |
10 |
11 | with open('README.md') as readme_file:
12 | readme = readme_file.read()
13 |
14 | with open('HISTORY.rst') as history_file:
15 | history = history_file.read()
16 |
17 | setup(
18 | name='tntorch',
19 | version='0.1.0',
20 | description="Tensor Network Learning with PyTorch",
21 | long_description=readme + '\n\n' + history,
22 | url='https://github.com/rballester/tntorch',
23 | author="Rafael Ballester-Ripoll",
24 | author_email='rballester@ifi.uzh.ch',
25 | packages=[
26 | 'tntorch',
27 | ],
28 | include_package_data=True,
29 | install_requires=[
30 | 'numpy',
31 | 'scipy',
32 | 'torch',
33 | ],
34 | license="LGPL",
35 | zip_safe=False,
36 | keywords='tntorch',
37 | classifiers=[
38 | 'License :: OSI Approved :: ISC License (ISCL)',
39 | 'Natural Language :: English',
40 | 'Programming Language :: Python :: 3',
41 | 'Programming Language :: Python :: 3.3',
42 | 'Programming Language :: Python :: 3.4',
43 | 'Programming Language :: Python :: 3.5',
44 | 'Programming Language :: Python :: 3.6',
45 | ],
46 | test_suite='tests',
47 | tests_require='pytest'
48 | )
49 |
--------------------------------------------------------------------------------
/tests/test_automata.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | import numpy as np
4 |
5 |
6 | def test_weight_mask():
7 |
8 | for N in range(1, 5):
9 | for k in range(1, N):
10 | gt = tn.automata.weight_mask(N, k)
11 | idx = torch.Tensor(np.array(np.unravel_index(np.arange(gt.numel(), dtype=np.int), list(gt.shape))).T)
12 | assert torch.norm((torch.sum(idx, dim=1).round() == k).float() - gt[idx].torch().round().float()) <= 1e-7
13 |
14 | def test_accepted_inputs():
15 |
16 | for i in range(10):
17 | gt = tn.Tensor(torch.randint(0, 2, (1, 2, 3, 4)))
18 | idx = tn.automata.accepted_inputs(gt)
19 | assert len(idx) == round(tn.sum(gt).item())
20 | assert torch.norm(gt[idx].torch() - 1).item() <= 1e-7
21 |
--------------------------------------------------------------------------------
/tests/test_cross.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | from util import random_format
4 |
5 |
6 | def test_domain():
7 |
8 | def function(Xs):
9 | return 1. / torch.sum(Xs, dim=1)
10 |
11 | domain = [torch.linspace(1, 10, 10) for n in range(3)]
12 | t = tn.cross(function=function, domain=domain, ranks_tt=3, function_arg='matrix')
13 | gt = torch.meshgrid(domain)
14 | gt = 1. / sum(gt)
15 |
16 | assert tn.relative_error(gt, t) < 5e-2
17 |
18 |
19 | def test_tensors():
20 |
21 | for i in range(100):
22 | t = random_format([10] * 6)
23 | t2 = tn.cross(function=lambda x: x, tensors=t, ranks_tt=15, verbose=False)
24 | assert tn.relative_error(t, t2) < 1e-6
25 |
26 |
27 | def test_ops():
28 |
29 | x, y, z, w = tn.meshgrid([32]*4)
30 | t = x + y + z + w + 1
31 | assert tn.relative_error(1/t.torch(), 1/t) < 1e-4
32 | assert tn.relative_error(torch.cos(t.torch()), tn.cos(t)) < 1e-4
33 | assert tn.relative_error(torch.exp(t.torch()), tn.exp(t)) < 1e-4
34 |
--------------------------------------------------------------------------------
/tests/test_derivatives.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 |
4 |
5 | def test_divergence():
6 |
7 | t = tn.rand([10] * 3 + [3], ranks_tt=3)
8 | d = tn.divergence([t[..., 0], t[..., 1], t[..., 2]])
9 | x = t.numpy()
10 |
11 | def partial(x, mode):
12 | return np.concatenate([np.diff(x, axis=mode),
13 | np.zeros([sh for sh in x.shape[:mode]] + [1] + [sh for sh in x.shape[mode+1:]])],
14 | axis=mode)
15 |
16 | gt = partial(x[..., 0], 0)
17 | gt += partial(x[..., 1], 1)
18 | gt += partial(x[..., 2], 2)
19 | assert np.linalg.norm(d.numpy() - gt) / np.linalg.norm(gt) <= 1e-7
20 |
--------------------------------------------------------------------------------
/tests/test_gpu.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 |
4 | # in case the computer testing has no gpu, tests will just pass
5 | device = 'cuda' if torch.cuda.is_available() else 'cpu'
6 |
7 | def test_tt():
8 | X = torch.randn(16,16,16)
9 | y1 = tn.Tensor(X, ranks_tt=3).torch()
10 | y2 = tn.Tensor(X, ranks_tt=3, device=device).torch().cpu()
11 | assert torch.abs(y1-y2).max() < 1e-5
12 |
13 | def test_tucker():
14 | X = torch.randn(16,16,16)
15 | y1 = tn.Tensor(X, ranks_tucker=3).torch()
16 | y2 = tn.Tensor(X, ranks_tucker=3, device=device).torch().cpu()
17 | assert torch.abs(y1-y2).max() < 1e-5
18 |
19 | def test_cp():
20 | X = torch.randn(16,16,16)
21 | y1 = tn.Tensor(X, ranks_cp=3).torch()
22 | y2 = tn.Tensor(X, ranks_cp=3, device=device).torch().cpu()
23 | assert torch.abs(y1-y2).max() < 1e-5
24 |
25 |
--------------------------------------------------------------------------------
/tests/test_indexing.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 | from util import random_format
4 |
5 |
6 | def check(x, t, idx):
7 |
8 | xidx = x[idx]
9 | tidx = t[idx].numpy()
10 | assert np.array_equal(xidx.shape, tidx.shape)
11 | assert np.linalg.norm(xidx - tidx) / np.linalg.norm(xidx) <= 1e-7
12 |
13 |
14 | def test_squeeze():
15 |
16 | for i in range(100):
17 | x = np.random.randint(1, 3, np.random.randint(2, 10))
18 | t = tn.Tensor(x)
19 | x = np.squeeze(x)
20 | t = tn.squeeze(t)
21 | assert np.array_equal(x.shape, t.shape)
22 |
23 |
24 | def test_slicing():
25 |
26 | t = tn.rand([1, 3, 1, 2, 1], ranks_tt=3, ranks_tucker=2)
27 | x = t.numpy()
28 | idx = slice(None)
29 | check(x, t, idx)
30 | idx = (slice(None), slice(1, None))
31 | check(x, t, idx)
32 | idx = (slice(None), slice(0, 2, None), slice(0, 1))
33 | check(x, t, idx)
34 |
35 |
36 | def test_mixed():
37 |
38 | def check_one_tensor(t):
39 |
40 | x = t.numpy()
41 |
42 | idxs = []
43 | idxs.append(([0, 0, 0], None, None, 3))
44 | idxs.append(([0, 0, 0, 0, 0], slice(None), None, 0))
45 | idxs.append((0, [0]))
46 | idxs.append(([0], [0]))
47 | idxs.append(([0], None, None, None, 0, 1))
48 | idxs.append((slice(None), [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]))
49 | idxs.append(([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]))
50 | idxs.append((slice(None), slice(None), slice(None), 0))
51 | idxs.append((slice(None), slice(None), [0, 1], 0))
52 | idxs.append((0, np.array([0]), None, 0))
53 | idxs.append((slice(None), slice(None), slice(None), slice(None), None))
54 | idxs.append((None, slice(None), slice(None), slice(None), slice(None), None))
55 | idxs.append((None, slice(None), slice(None), slice(None), slice(None)))
56 |
57 |
58 | for idx in idxs:
59 | check(x, t, idx)
60 |
61 | check_one_tensor(tn.rand([6, 7, 8, 9], ranks_tt=3, ranks_tucker=2))
62 | check_one_tensor(tn.rand([6, 7, 8, 9], ranks_tt=None, ranks_tucker=2, ranks_cp=3))
63 | check_one_tensor(tn.rand([6, 7, 8, 9], ranks_tt=[4, None, None], ranks_tucker=2, ranks_cp=[None, None, 3, 3]))
64 | check_one_tensor(tn.rand([6, 7, 8, 9], ranks_tt=[4, None, None], ranks_tucker=[2, None, 2, None], ranks_cp=[None, None, 3, 3]))
65 | check_one_tensor(tn.rand([6, 7, 8, 9], ranks_tt=[None, 4, 4], ranks_tucker=2, ranks_cp=[3, None, None, None]))
66 |
67 | for i in range(100):
68 | check_one_tensor(random_format([6, 7, 8, 9]))
69 |
70 | t = tn.rand([6, 7, 8, 9], ranks_cp=[3, 3, 3, 3])
71 | t.cores[-1] = t.cores[-1].permute(1, 0)[:, :, None]
72 | check_one_tensor(t)
73 |
--------------------------------------------------------------------------------
/tests/test_init.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 |
4 |
5 | def test_from_ndarray():
6 |
7 | for i in range(100):
8 | gt = np.random.rand(*np.random.randint(1, 8, np.random.randint(1, 6)))
9 | t = tn.Tensor(gt)
10 | reco = t.numpy()
11 | assert np.linalg.norm(gt - reco) / np.linalg.norm(gt) <+ 1e-7
12 |
--------------------------------------------------------------------------------
/tests/test_ops.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 | import torch
4 | from util import random_format
5 |
6 |
7 | def check(t1, t2):
8 | x1 = t1.torch()
9 | x2 = t2.torch()
10 | assert tn.relative_error(t1+t2, x1+x2) <= 1e-7
11 | assert tn.relative_error(t1-t2, x1-x2) <= 1e-7
12 | assert tn.relative_error(t1*t2, x1*x2) <= 1e-7
13 | assert tn.relative_error(-t1+t2, -x1+x2) <= 1e-7
14 |
15 |
16 | def test_ops():
17 |
18 | for i in range(100):
19 | t1 = tn.rand(np.random.randint(1, 8, np.random.randint(1, 6)), ranks_tt=3, ranks_tucker=2)
20 | t2 = tn.rand(t1.shape)
21 | check(t1, t2)
22 |
23 | shape = [8]*4
24 |
25 | t1 = tn.rand(shape, ranks_tt=[3, None, None], ranks_cp=[None, None, 2, 2], ranks_tucker=5)
26 | t2 = tn.rand(shape, ranks_tt=[None, 2, None], ranks_cp=[4, None, None, 3])
27 | check(t1, t2)
28 |
29 | t2 = t1*2
30 | check(t1, t2)
31 |
32 | for i in range(100):
33 | t1 = random_format(shape)
34 | t2 = random_format(shape)
35 | check(t1, t2)
36 |
37 |
38 | def test_broadcast():
39 |
40 | for i in range(10):
41 | shape1 = np.random.randint(1, 10, 4)
42 | shape2 = shape1.copy()
43 | shape2[np.random.choice(len(shape1), np.random.randint(0, len(shape1)+1))] = 1
44 | t1 = random_format(shape1)
45 | t2 = random_format(shape2)
46 | check(t1, t2)
47 |
48 |
49 | def test_dot():
50 |
51 | def check():
52 | x1 = t1.torch()
53 | x2 = t2.torch()
54 | gt = torch.dot(x1.flatten(), x2.flatten())
55 | assert tn.relative_error(tn.dot(t1, t2), gt) <= 1e-7
56 |
57 | t1 = tn.rand(np.random.randint(1, 8, np.random.randint(1, 6)), ranks_tt=2, ranks_tucker=None)
58 | t2 = tn.rand(t1.shape, ranks_tt=3, ranks_tucker=None)
59 | check()
60 |
61 | t1 = tn.rand(np.random.randint(1, 8, np.random.randint(1, 6)), ranks_tt=2, ranks_tucker=4)
62 | t2 = tn.rand(t1.shape, ranks_tt=3, ranks_tucker=None)
63 | check()
64 |
65 | t1 = tn.rand(np.random.randint(1, 8, np.random.randint(1, 6)), ranks_tt=2, ranks_tucker=None)
66 | t2 = tn.rand(t1.shape, ranks_tt=3, ranks_tucker=4)
67 | check()
68 |
69 | t1 = tn.rand(np.random.randint(1, 8, np.random.randint(1, 6)), ranks_tt=2, ranks_tucker=3)
70 | t2 = tn.rand(t1.shape, ranks_tt=3, ranks_tucker=4)
71 | check()
72 |
73 | t1 = tn.rand([32] * 4, ranks_tt=[3, None, None], ranks_cp=[None, None, 10, 10], ranks_tucker=5)
74 | t2 = tn.rand([32]*4, ranks_tt=[None, 2, None], ranks_cp=[4, None, None, 5])
75 | check()
76 |
77 | shape = [8]*4
78 | for i in range(100):
79 | t1 = random_format(shape)
80 | t2 = random_format(shape)
81 | check()
82 |
83 |
84 | def test_stats():
85 |
86 | def check():
87 | x = t.torch()
88 | assert tn.relative_error(tn.mean(t), torch.mean(x)) <= 1e-3
89 | assert tn.relative_error(tn.var(t), torch.var(x)) <= 1e-3
90 | assert tn.relative_error(tn.norm(t), torch.norm(x)) <= 1e-3
91 |
92 | shape = [8]*4
93 | for i in range(100):
94 | t = random_format(shape)
95 | check()
96 |
97 |
--------------------------------------------------------------------------------
/tests/test_round.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 |
4 |
5 | def test_orthogonalization():
6 |
7 | for i in range(100):
8 | gt = tn.rand(np.random.randint(1, 8, np.random.randint(2, 6)))
9 | t = gt.clone()
10 | assert tn.relative_error(gt, t) <= 1e-7
11 | t.left_orthogonalize(0)
12 | assert tn.relative_error(gt, t) <= 1e-7
13 | t.right_orthogonalize(t.dim()-1)
14 | assert tn.relative_error(gt, t) <= 1e-7
15 | t.orthogonalize(np.random.randint(t.dim()))
16 | assert tn.relative_error(gt, t) <= 1e-7
17 |
18 |
19 | def test_round_tt_svd():
20 |
21 | for i in range(100):
22 | gt = tn.rand(np.random.randint(1, 8, np.random.randint(8, 10)), ranks_tt=np.random.randint(1, 10))
23 | gt.round_tt(1e-8, algorithm='svd')
24 | t = gt+gt
25 | t.round_tt(1e-8, algorithm='svd')
26 | assert tn.relative_error(gt, t/2) <= 1e-4
27 | assert max(gt.ranks_tt) == max(t.ranks_tt)
28 |
29 |
30 | def test_round_tt_eig():
31 |
32 | for i in range(100):
33 | gt = tn.rand(np.random.randint(1, 8, np.random.randint(8, 10)), ranks_tt=np.random.randint(1, 10))
34 | gt.round_tt(1e-8, algorithm='eig')
35 | t = gt+gt
36 | t.round_tt(1e-8, algorithm='eig')
37 | assert tn.relative_error(gt, t/2) <= 1e-7
38 |
39 |
40 | def test_round_tucker():
41 | for i in range(100):
42 | eps = np.random.rand()**2
43 | gt = tn.rand([32]*4, ranks_tt=8, ranks_tucker=8)
44 | t = gt.clone()
45 | t.round_tucker(eps=eps)
46 | assert tn.relative_error(gt, t) <= eps
47 |
--------------------------------------------------------------------------------
/tests/test_tools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 |
4 |
5 | def test_cat():
6 |
7 | for i in range(100):
8 | N = np.random.randint(1, 4)
9 | shape1 = np.random.randint(1, 10, N)
10 | mode = np.random.randint(N)
11 | shape2 = shape1.copy()
12 | shape2[mode] = np.random.randint(1, 10)
13 | t1 = tn.rand(shape1, ranks_tt=2, ranks_tucker=2)
14 | t2 = tn.rand(shape2, ranks_tt=2)
15 | gt = np.concatenate([t1.numpy(), t2.numpy()], mode)
16 | assert np.linalg.norm(gt - tn.cat([t1, t2], dim=mode).numpy()) <= 1e-7
17 |
18 |
19 | def test_cumsum():
20 |
21 | for i in range(100):
22 | N = np.random.randint(1, 4)
23 | howmany = 1
24 | modes = np.random.choice(N, howmany, replace=False)
25 | shape = np.random.randint(1, 10, N)
26 | t = tn.rand(shape, ranks_tt=2, ranks_tucker=2)
27 | assert np.linalg.norm(tn.cumsum(t, modes).numpy() - np.cumsum(t.numpy(), *modes)) <= 1e-7
28 |
--------------------------------------------------------------------------------
/tests/util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tntorch as tn
3 |
4 |
5 | def random_format(shape):
6 | """
7 | Generate a random tensor of random format (often hybrid) with the given shape
8 |
9 | :param shape:
10 | :return: a tensor
11 | """
12 |
13 | N = len(shape)
14 | if np.random.randint(4) == 0:
15 | ranks_tucker = None
16 | else:
17 | ranks_tucker= [None]*N
18 | for n in sorted(np.random.choice(N, np.random.randint(N+1), replace=False)):
19 | ranks_tucker[n] = np.random.randint(1, 5)
20 | if np.random.randint(4) == 0:
21 | ranks_tt = None
22 | ranks_cp = np.random.randint(1, 5)
23 | elif np.random.randint(4) == 0:
24 | ranks_cp = None
25 | ranks_tt = np.random.randint(1, 5, N-1)
26 | else:
27 | ranks_tt = list(np.random.randint(1, 5, N-1))
28 | ranks_cp = [None]*N
29 | for n in sorted(np.random.choice(N, np.random.randint(N+1), replace=False)):
30 | if n > 0 and ranks_cp[n-1] is not None:
31 | r = ranks_cp[n-1]
32 | else:
33 | r = np.random.randint(1, 5)
34 | ranks_cp[n] = r
35 | if n > 0:
36 | ranks_tt[n-1] = None
37 | if n < N-1:
38 | ranks_tt[n] = None
39 | return tn.randn(shape, ranks_tt=ranks_tt, ranks_cp=ranks_cp, ranks_tucker=ranks_tucker)
--------------------------------------------------------------------------------
/tntorch/__init__.py:
--------------------------------------------------------------------------------
1 | from .anova import *
2 | from .autodiff import *
3 | from .automata import *
4 | from .create import *
5 | from .cross import *
6 | from .derivatives import *
7 | from .logic import *
8 | from .metrics import *
9 | from .ops import *
10 | from .round import *
11 | from .tensor import *
12 | from .tools import *
13 |
--------------------------------------------------------------------------------
/tntorch/anova.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy as np
3 | import torch
4 | import tntorch as tn
5 |
6 |
7 | def anova_decomposition(t, marginals=None):
8 | """
9 | Compute an extended tensor that contains all terms of the ANOVA decomposition for a given tensor.
10 |
11 | Reference: R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Sobol Tensor Trains for Global Sensitivity Analysis" (2017) `_
12 |
13 | :param t: ND input tensor
14 | :param marginals: list of N vectors, each containing the PMF for each variable (use None for uniform distributions)
15 | :return: a :class:`Tensor`
16 | """
17 |
18 | marginals = copy.deepcopy(marginals)
19 | if marginals is None:
20 | marginals = [None] * t.dim()
21 | for n in range(t.dim()):
22 | if marginals[n] is None:
23 | marginals[n] = torch.ones([t.shape[n]]) / float(t.shape[n])
24 | cores = [c.clone() for c in t.cores]
25 | Us = []
26 | idxs = []
27 | for n in range(t.dim()):
28 | if t.Us[n] is None:
29 | U = torch.eye(t.shape[n])
30 | else:
31 | U = t.Us[n]
32 | expected = torch.sum(U * (marginals[n][:, None] / torch.sum(marginals[n])), dim=0, keepdim=True)
33 | Us.append(torch.cat((expected, U-expected), dim=0))
34 | idxs.append([0] + [1]*t.shape[n])
35 | return tn.Tensor(cores, Us, idxs=idxs)
36 |
37 |
38 | def undo_anova_decomposition(a):
39 | """
40 | Undo the transformation done by :func:`anova_decomposition()`.
41 |
42 | :param a: a :class:`Tensor` obtained with :func:`anova_decomposition()`
43 |
44 | :return: a :class:`Tensor` t that has `a` as its ANOVA tensor
45 | """
46 |
47 | cores = []
48 | Us = []
49 | for n in range(a.dim()):
50 | if a.Us[n] is None:
51 | cores.append(a.cores[n][..., 1:, :] + a.cores[n][..., 0:1, :])
52 | Us.append(None)
53 | else:
54 | cores.append(a.cores[n].clone())
55 | Us.append(a.Us[n][1:, :] + a.Us[n][0:1, :])
56 | return tn.Tensor(cores, Us=Us)
57 |
58 |
59 | def truncate_anova(t, mask, keepdim=False, marginals=None):
60 | """
61 | Given a tensor and a mask, return the function that results after deleting all ANOVA terms that do not satisfy the
62 | mask.
63 |
64 | :Example:
65 |
66 | >>> t = ... # an ND tensor
67 | >>> x = tn.symbols(t.dim())[0]
68 | >>> t2 = tn.truncate_anova(t, mask=tn.only(x), keepdim=False) # This tensor will depend on one variable only
69 |
70 | :param t:
71 | :param mask:
72 | :param keepdim: if True, all dummy dimensions will be preserved, otherwise they will disappear. Default is False
73 | :param marginals: see :func:`anova_decomposition()`
74 |
75 | :return: a :class:`Tensor`
76 | """
77 |
78 | t = tn.undo_anova_decomposition(tn.mask(tn.anova_decomposition(t, marginals=marginals), mask=mask))
79 | if not keepdim:
80 | N = t.dim()
81 | affecting = torch.sum(torch.Tensor(tn.accepted_inputs(mask).double()), dim=0)
82 | slices = [0 for n in range(N)]
83 | for i in np.where(affecting)[0]:
84 | slices[int(i)] = slice(None)
85 | t = t[slices]
86 | return t
87 |
88 |
89 | def sobol(t, mask, marginals=None, normalize=True):
90 | """
91 | Compute Sobol indices (as given by a certain mask) for a tensor and independently distributed input variables.
92 |
93 | Reference: R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Sobol Tensor Trains for Global Sensitivity Analysis" (2017) `_
94 |
95 | :param t: an N-dimensional :class:`Tensor`
96 | :param mask: an N-dimensional mask
97 | :param marginals: a list of N vectors (will be normalized if not summing to 1). If None (default), uniform distributions are assumed for all variables
98 | :param normalize: whether to normalize indices by the total variance of the model (True by default)
99 |
100 | :return: a scalar >= 0
101 | """
102 |
103 | if marginals is None:
104 | marginals = [None] * t.dim()
105 |
106 | a = tn.anova_decomposition(t, marginals)
107 | a -= tn.Tensor([torch.cat((torch.ones(1, 1, 1),
108 | torch.zeros(1, sh-1, 1)), dim=1)
109 | for sh in a.shape])*a[(0,)*t.dim()] # Set empty tuple to 0
110 | am = a.clone()
111 | for n in range(t.dim()):
112 | if marginals[n] is None:
113 | m = torch.ones([t.shape[n]])
114 | else:
115 | m = marginals[n]
116 | m /= torch.sum(m) # Make sure each marginal sums to 1
117 | if am.Us[n] is None:
118 | if am.cores[n].dim() == 3:
119 | am.cores[n][:, 1:, :] *= m[None, :, None]
120 | else:
121 | am.cores[n][1:, :] *= m[:, None]
122 | else:
123 | am.Us[n][1:, :] *= m[:, None]
124 | am_masked = tn.mask(am, mask)
125 | if am_masked.cores[-1].shape[-1] > 1:
126 | am_masked.cores.append(torch.eye(am_masked.cores[-1].shape[-1])[:, :, None])
127 | am_masked.Us.append(None)
128 |
129 | if normalize:
130 | return tn.dot(a, am_masked) / tn.dot(a, am)
131 | else:
132 | return tn.dot(a, am_masked)
133 |
134 |
135 | def mean_dimension(t, mask=None, marginals=None):
136 | """
137 | Computes the mean dimension of a given tensor with given marginal distributions. This quantity measures how well the
138 | represented function can be expressed as a sum of low-parametric functions. For example, mean dimension 1 (the
139 | lowest possible value) means that it is a purely additive function: :math:`f(x_1, ..., x_N) = f_1(x_1) + ... + f_N(x_N)`.
140 |
141 | Assumption: the input variables :math:`x_n` are independently distributed.
142 |
143 | References:
144 |
145 | - R. E. Caflisch, W. J. Morokoff, and A. B. Owen: `"Valuation of Mortgage Backed Securities Using Brownian Bridges to Reduce Effective Dimension" (1997) `_
146 |
147 | - R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Tensor Algorithms for Advanced Sensitivity Metrics" (2017) `_
148 |
149 | :param t: an N-dimensional :class:`Tensor`
150 | :param marginals: a list of N vectors (will be normalized if not summing to 1). If None (default), uniform distributions are assumed for all variables
151 |
152 | :return: a scalar >= 1
153 | """
154 |
155 | if mask is None:
156 | return tn.sobol(t, tn.weight(t.dim()), marginals=marginals)
157 | else:
158 | return tn.sobol(t, tn.mask(tn.weight(t.dim()), mask), marginals=marginals) / tn.sobol(t, mask, marginals=marginals)
159 |
160 |
161 | def dimension_distribution(t, mask=None, order=None, marginals=None):
162 | """
163 | Computes the dimension distribution of an ND tensor.
164 |
165 | :param t: ND input :class:`Tensor`
166 | :param mask: an optional mask :class:`Tensor` to restrict to
167 | :param order: int, compute only this many order contributions. By default, all N are returned
168 | :param marginals: PMFs for input variables. By default, uniform distributions
169 |
170 | :return: a PyTorch vector containing N elements
171 | """
172 |
173 | if order is None:
174 | order = t.dim()
175 | if mask is None:
176 | return tn.sobol(t, tn.weight_one_hot(t.dim(), order+1), marginals=marginals).torch()[1:]
177 | else:
178 | mask2 = tn.mask(tn.weight_one_hot(t.dim(), order+1), mask)
179 | return tn.sobol(t, mask2, marginals=marginals).torch()[1:] / tn.sobol(t, mask, marginals=marginals)
180 |
--------------------------------------------------------------------------------
/tntorch/autodiff.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | import numpy as np
4 | import time
5 | from functools import reduce
6 |
7 |
8 | def optimize(tensors, loss_function, optimizer=torch.optim.Adam, tol=1e-4, max_iter=1e4, print_freq=500, verbose=True):
9 | """
10 | High-level wrapper for iterative learning.
11 |
12 | Default stopping criterion: either the absolute (or relative) loss improvement must fall below `tol`.
13 | In addition, the rate loss improvement must be slowing down.
14 |
15 | :param tensors: one or several tensors; will be fed to `loss_function` and optimized in place
16 | :param loss_function: must take `tensors` and return a scalar (or tuple thereof)
17 | :param optimizer: one from https://pytorch.org/docs/stable/optim.html. Default is torch.optim.Adam
18 | :param tol: stopping criterion
19 | :param max_iter: default is 1e4
20 | :param print_freq: progress will be printed every this many iterations
21 | :param verbose:
22 | """
23 |
24 | if not isinstance(tensors, (list, tuple)):
25 | tensors = [tensors]
26 | parameters = []
27 | for t in tensors:
28 | if isinstance(t, tn.Tensor):
29 | parameters.extend([c for c in t.cores if c.requires_grad])
30 | parameters.extend([U for U in t.Us if U is not None and U.requires_grad])
31 | elif t.requires_grad:
32 | parameters.append(t)
33 | if len(parameters) == 0:
34 | raise ValueError("There are no parameters to optimize. Did you forget a requires_grad=True somewhere?")
35 |
36 | optimizer = optimizer(parameters)
37 | losses = []
38 | converged = False
39 | start = time.time()
40 | iter = 0
41 | while True:
42 | optimizer.zero_grad()
43 | loss = loss_function(*tensors)
44 | if not isinstance(loss, (tuple, list)):
45 | loss = [loss]
46 | losses.append(reduce(lambda x, y: x + y, loss))
47 | if len(losses) >= 2:
48 | delta_loss = (losses[-1] - losses[-2])
49 | else:
50 | delta_loss = float('-inf')
51 | if iter >= 2 and tol is not None and (losses[-1] <= tol or -delta_loss / losses[-1] <= tol) and losses[-2] - \
52 | losses[-1] < losses[-3] - losses[-2]:
53 | converged = True
54 | break
55 | if iter == max_iter:
56 | break
57 | if verbose and iter % print_freq == 0:
58 | print('iter: {: <{}} | loss: '.format(iter, len('{}'.format(max_iter))), end='')
59 | print(' + '.join(['{:10.6f}'.format(l.item()) for l in loss]), end='')
60 | if len(loss) > 1:
61 | print(' = {:10.4}'.format(losses[-1].item()), end='')
62 | print(' | total time: {:9.4f}'.format(time.time() - start))
63 | losses[-1].backward()
64 | optimizer.step()
65 | iter += 1
66 | if verbose:
67 | print('iter: {: <{}} | loss: '.format(iter, len('{}'.format(max_iter))), end='')
68 | print(' + '.join(['{:10.6f}'.format(l.item()) for l in loss]), end='')
69 | if len(loss) > 1:
70 | print(' = {:10.4}'.format(losses[-1].item()), end='')
71 | print(' | total time: {:9.4f}'.format(time.time() - start), end='')
72 | if converged:
73 | print(' <- converged (tol={})'.format(tol))
74 | else:
75 | print(' <- max_iter was reached: {}'.format(max_iter))
76 |
77 |
78 | def dof(t):
79 | """
80 | Compute the number of degrees of freedom of a tensor network.
81 |
82 | It is the sum of sizes of all its tensor nodes that have the requires_grad=True flag.
83 |
84 | :param t: input tensor
85 |
86 | :return: an integer
87 | """
88 |
89 | result = 0
90 | for n in range(t.dim()):
91 | if t.cores[n].requires_grad:
92 | result += t.cores[n].numel()
93 | if t.Us[n] is not None and t.Us[n].requires_grad:
94 | result += t.Us[n].numel()
95 | return result
--------------------------------------------------------------------------------
/tntorch/automata.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import tntorch as tn
3 |
4 |
5 | def weight_mask(N, weight, nsymbols=2):
6 | """
7 | Accepts a string iff its number of 1's equals (or is in) `weight`
8 |
9 | :param N: number of dimensions
10 | :param weight: an integer (or list thereof): recognized weight(s)
11 | :param nsymbols: slices per core (default is 2)
12 |
13 | :return: a mask tensor
14 | """
15 |
16 | if not hasattr(weight, '__len__'):
17 | weight = [weight]
18 | weight = torch.Tensor(weight).long()
19 | assert weight[0] >= 0
20 | t = tn.weight_one_hot(N, int(max(weight) + 1), nsymbols)
21 | t.cores[-1] = torch.sum(t.cores[-1][:, :, weight], dim=2, keepdim=True)
22 | return t
23 |
24 |
25 | def weight_one_hot(N, r=None, nsymbols=2):
26 | """
27 | Given a string with :math:`k` 1's, it produces a vector that represents :math:`k` in `one hot encoding `_
28 |
29 | :param N: number of dimensions
30 | :param r:
31 | :param nsymbols:
32 |
33 | :return: a vector of N zeros, except its :math:`k`-th element which is a 1
34 | """
35 |
36 | if not hasattr(nsymbols, '__len__'):
37 | nsymbols = [nsymbols]*N
38 | assert len(nsymbols) == N
39 | if r is None:
40 | r = N+1
41 |
42 | cores = []
43 | for n in range(N):
44 | core = torch.zeros([r, nsymbols[n], r])
45 | core[:, 0, :] = torch.eye(r)
46 | for s in range(1, nsymbols[n]):
47 | core[:, s, s:] = torch.eye(r)[:, :-s]
48 | cores.append(core)
49 | cores[0] = cores[0][0:1, :, :]
50 | return tn.Tensor(cores)
51 |
52 |
53 | def weight(N, nsymbols=2):
54 | """
55 | For any string, counts how many 1's it has
56 |
57 | :param N: number of dimensions
58 | :param nsymbols: slices per core (default is 2)
59 |
60 | :return: a mask tensor
61 | """
62 |
63 | cores = []
64 | for n in range(N):
65 | core = torch.eye(2)[:, None, :].repeat(1, nsymbols, 1)
66 | core[1, :, 0] = torch.arange(nsymbols)
67 | cores.append(core)
68 | cores[0] = cores[0][1:2, :, :]
69 | cores[-1] = cores[-1][:, :, 0:1]
70 | return tn.Tensor(cores)
71 |
72 |
73 | def length(N): # TODO
74 | """
75 | :todo:
76 |
77 | :param N:
78 | :return:
79 | """
80 | raise NotImplementedError
81 |
82 |
83 | def accepted_inputs(t):
84 | """
85 | Returns all strings accepted by an automaton, in alphabetical order.
86 |
87 | Note: each string s will appear as many times as the value t[s]
88 |
89 | :param t: a :class:`Tensor`
90 |
91 | :return Xs: a Torch matrix, each row is one string
92 | """
93 |
94 | def recursion(Xs, left, rights, bound, mu):
95 | if mu == t.dim():
96 | return
97 | fiber = torch.einsum('ijk,k->ij', (t.cores[mu], rights[mu + 1]))
98 | per_point = torch.matmul(left, fiber).round()
99 | c = torch.cat((torch.Tensor([0]), per_point.cumsum(dim=0))).long()
100 | for i, p in enumerate(per_point):
101 | if c[i] == c[i+1]: # Improductive prefix, don't go further
102 | continue
103 | Xs[bound+c[i]:bound+c[i+1], mu] = i
104 | recursion(Xs, torch.matmul(left, t.cores[mu][:, i, :]), rights, bound + c[i], mu+1)
105 |
106 | Xs = torch.zeros([round(tn.sum(t).item()), t.dim()], dtype=torch.long)
107 | rights = [torch.ones(1)] # Precomputed right-product chains
108 | for core in t.cores[::-1]:
109 | rights.append(torch.matmul(torch.sum(core, dim=1), rights[-1]))
110 | rights = rights[::-1]
111 | recursion(Xs, torch.ones(1), rights, 0, 0)
112 | return Xs
113 |
--------------------------------------------------------------------------------
/tntorch/create.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | import numpy as np
4 |
5 |
6 | def eye(n, m=None, device=None, requires_grad=None):
7 | """
8 | Generates identity matrix like PyTorch's `eye()`.
9 |
10 | :param n: number of rows
11 | :param m: number of columns (default is n)
12 |
13 | :return: a 2D :class:`Tensor`
14 | """
15 |
16 | c1 = torch.eye(n, m)
17 | c2 = torch.eye(m, m)
18 | return tn.Tensor([c1[None, :, :], c2[:, :, None]], device=device, requires_grad=requires_grad)
19 |
20 |
21 | def rand(*shape, **kwargs):
22 | """
23 | Generate a :class:`Tensor` with random cores (and optionally factors), whose entries are uniform in :math:`[0, 1]`.
24 |
25 | :Example:
26 |
27 | >>> tn.rand([10, 10], ranks_tt=3) # Rank-3 TT tensor of shape 10x10
28 |
29 | :param shape: N ints (or a list of ints)
30 | :param ranks_tt: an integer or list of N-1 ints
31 | :param ranks_cp: an int or list. If a list, will be interleaved with ranks_tt
32 | :param ranks_tucker: an int or list
33 | :param requires_grad: default is False
34 | :param device:
35 |
36 | :return: a random tensor
37 | """
38 |
39 | return _create(torch.rand, *shape, **kwargs)
40 |
41 |
42 | def rand_like(t, **kwargs):
43 | """
44 | Calls :meth:`rand()` with the shape of a given tensor.
45 |
46 | :param t: a tensor
47 | :param kwargs:
48 |
49 | :return: a :class:`Tensor`
50 | """
51 |
52 | return _create(torch.rand, t.shape, **kwargs)
53 |
54 |
55 | def randn(*shape, **kwargs):
56 | """
57 | Like :meth:`rand()`, but entries are normally distributed with :math:`\\mu=0, \\sigma=1`.
58 | """
59 |
60 | return _create(torch.randn, *shape, **kwargs)
61 |
62 |
63 | def randn_like(t, **kwargs):
64 | """
65 | Calls :meth:`randn()` with the shape of a given tensor.
66 |
67 | :param t: a tensor
68 | :param kwargs:
69 |
70 | :return: a :class:`Tensor`
71 | """
72 |
73 | return _create(torch.randn, t.shape, **kwargs)
74 |
75 |
76 | def ones(*shape, **kwargs):
77 | """
78 | Generate a :class:`Tensor` filled with ones.
79 |
80 | :Example:
81 |
82 | >>> tn.ones(10) # Vector of ones
83 |
84 | :param shape: N ints (or a list of ints)
85 | :param requires_grad:
86 | :param device:
87 |
88 | :return: a TT :class:`Tensor` of rank 1
89 | """
90 |
91 | return _create(torch.ones, *shape, ranks_tt=1, **kwargs)
92 |
93 |
94 | def ones_like(t, **kwargs):
95 | """
96 | Calls :meth:`ones()` with the shape of a given tensor.
97 |
98 | :param t: a tensor
99 | :param kwargs:
100 |
101 | :return: a :class:`Tensor`
102 | """
103 |
104 | return ones(t.shape, **kwargs)
105 |
106 |
107 | def full(shape, fill_value, **kwargs):
108 | """
109 | Generate a :class:`Tensor` filled with a constant.
110 |
111 | :param shape: list of ints
112 | :param fill_value: constant to fill the tensor with
113 | :param requires_grad:
114 | :param device:
115 |
116 | :return: a TT :class:`Tensor` of rank 1
117 | """
118 |
119 | return fill_value*tn.ones(*shape, **kwargs)
120 |
121 |
122 | def full_like(t, fill_value, **kwargs):
123 | """
124 | Calls :meth:`full()` with the shape of a given tensor.
125 |
126 | :param t: a tensor
127 | :param kwargs:
128 |
129 | :return: a :class:`Tensor`
130 | """
131 |
132 | return tn.full(t.shape, fill_value=fill_value, **kwargs)
133 |
134 |
135 | def zeros(*shape, **kwargs):
136 | """
137 | Generate a :class:`Tensor` filled with zeros.
138 |
139 | :param shape: N ints (or a list of ints)
140 | :param requires_grad:
141 | :param device:
142 |
143 | :return: a TT :class:`Tensor` of rank 1
144 | """
145 |
146 | return _create(torch.zeros, *shape, ranks_tt=1, **kwargs)
147 |
148 |
149 | def zeros_like(t, **kwargs):
150 | """
151 | Calls :meth:`zeros()` with the shape of a given tensor.
152 |
153 | :param t: a tensor
154 | :param kwargs:
155 |
156 | :return: a :class:`Tensor`
157 | """
158 |
159 | return zeros(t.shape, **kwargs)
160 |
161 |
162 | def gaussian(shape, sigma_factor=0.2):
163 | """
164 | Create a multivariate Gaussian that is axis-aligned (i.e. with diagonal covariance matrix).
165 |
166 | :param shape: list of ints
167 | :param sigma_factor: a real (or list of reals) encoding the ratio sigma / shape. Default is 0.2, i.e. one fifth along each dimension
168 |
169 | :return: a :class:`Tensor` that sums to 1
170 | """
171 |
172 | if hasattr(shape[0], '__len__'):
173 | shape = shape[0]
174 | N = len(shape)
175 | if not hasattr(sigma_factor, '__len__'):
176 | sigma_factor = [sigma_factor]*N
177 |
178 | cores = [torch.ones(1, 1, 1) for n in range(N)]
179 | Us = []
180 | for n in range(N):
181 | sigma = sigma_factor[n] * shape[n]
182 | if shape[n] == 1:
183 | x = torch.Tensor([0])
184 | else:
185 | x = torch.linspace(-shape[n] / 2, shape[n] / 2, shape[n])
186 | U = torch.exp(-x**2 / (2*sigma**2))
187 | U = U[:, None] / torch.sum(U)
188 | Us.append(U)
189 | return tn.Tensor(cores, Us)
190 |
191 |
192 | def gaussian_like(tensor, **kwargs):
193 | """
194 | Calls :meth:`gaussian()` with the shape of a given tensor.
195 |
196 | :param t: a tensor
197 | :param kwargs:
198 |
199 | :return: a :class:`Tensor`
200 | """
201 |
202 | return gaussian(tensor.shape, **kwargs)
203 |
204 |
205 | def _create(function, *shape, ranks_tt=None, ranks_cp=None, ranks_tucker=None, requires_grad=False, device=None):
206 | if hasattr(shape[0], '__len__'):
207 | shape = shape[0]
208 | N = len(shape)
209 | if not hasattr(ranks_tucker, "__len__"):
210 | ranks_tucker = [ranks_tucker for n in range(len(shape))]
211 | corespatials = []
212 | for n in range(len(shape)):
213 | if ranks_tucker[n] is None:
214 | corespatials.append(shape[n])
215 | else:
216 | corespatials.append(ranks_tucker[n])
217 | if ranks_tt is None and ranks_cp is None:
218 | if ranks_tucker is None:
219 | raise ValueError('Specify at least one of: ranks_tt ranks_cp, ranks_tucker')
220 | # We imitate a Tucker decomposition: we set full TT-ranks
221 | datashape = [corespatials[0], np.prod(corespatials) // corespatials[0]]
222 | ranks_tt = []
223 | for n in range(1, N):
224 | ranks_tt.append(min(datashape))
225 | datashape = [datashape[0] * corespatials[n], datashape[1] // corespatials[n]]
226 | if not hasattr(ranks_tt, "__len__"):
227 | ranks_tt = [ranks_tt]*(N-1)
228 | ranks_tt = [None] + list(ranks_tt) + [None]
229 | if not hasattr(ranks_cp, '__len__'):
230 | ranks_cp = [ranks_cp]*N
231 | coreranks = [r for r in ranks_tt]
232 | for n in range(N):
233 | if ranks_cp[n] is not None:
234 | if ranks_tt[n] is not None or ranks_tt[n+1] is not None:
235 | raise ValueError('The ranks_tt and ranks_cp provided are incompatible')
236 | coreranks[n] = ranks_cp[n]
237 | coreranks[n+1] = ranks_cp[n]
238 | assert len(coreranks) == N+1
239 | if coreranks[0] is None:
240 | coreranks[0] = 1
241 | if coreranks[-1] is None:
242 | coreranks[-1] = 1
243 | if coreranks.count(None) > 0:
244 | raise ValueError('One or more TT/CP ranks were not specified')
245 | assert len(ranks_tucker) == N
246 |
247 | cores = []
248 | Us = []
249 | for n in range(len(shape)):
250 | if ranks_tucker[n] is None:
251 | Us.append(None)
252 | else:
253 | Us.append(function([shape[n], ranks_tucker[n]], requires_grad=requires_grad, device=device))
254 | if ranks_cp[n] is None:
255 | cores.append(function([coreranks[n], corespatials[n], coreranks[n+1]], requires_grad=requires_grad, device=device))
256 | else:
257 | cores.append(function([corespatials[n], ranks_cp[n]], requires_grad=requires_grad, device=device))
258 | return tn.Tensor(cores, Us=Us)
259 |
260 |
261 | def arange(*args, **kwargs):
262 | """
263 | Creates a 1D :class:`Tensor` (see PyTorch's `arange`).
264 |
265 | :param args:
266 | :param kwargs:
267 |
268 | :return: a 1D :class:`Tensor`
269 | """
270 |
271 | return tn.Tensor([torch.arange(*args, dtype=torch.get_default_dtype(), **kwargs)[None, :, None]])
272 |
273 |
274 | def linspace(*args, **kwargs):
275 | """
276 | Creates a 1D :class:`Tensor` with evenly spaced values (see PyTorch's `linspace`).
277 |
278 | :param args:
279 | :param kwargs:
280 |
281 | :return: a 1D :class:`Tensor`
282 | """
283 |
284 | return tn.Tensor([torch.linspace(*args, **kwargs)[None, :, None]])
285 |
286 |
287 | def logspace(*args, **kwargs):
288 | """
289 | Creates a 1D :class:`Tensor` with logarithmically spaced values (see PyTorch's `logspace`).
290 |
291 | :param args:
292 | :param kwargs:
293 |
294 | :return: a 1D :class:`Tensor`
295 | """
296 |
297 | return tn.Tensor([torch.logspace(*args, **kwargs)[None, :, None]])
298 |
--------------------------------------------------------------------------------
/tntorch/cross.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | import sys
4 | import time
5 | import numpy as np
6 | import maxvolpy.maxvol
7 | import logging
8 |
9 |
10 | def cross(function, domain=None, tensors=None, function_arg='vectors', ranks_tt=None, kickrank=3, rmax=100, eps=1e-6, max_iter=25, val_size=1000, verbose=True, return_info=False):
11 | """
12 | Cross-approximation routine that samples a black-box function and returns an N-dimensional tensor train approximating it. It accepts either:
13 |
14 | - A domain (tensor product of :math:`N` given arrays) and a function :math:`\\mathbb{R}^N \\to \\mathbb{R}`
15 | - A list of :math:`K` tensors of dimension :math:`N` and equal shape and a function :math:`\\mathbb{R}^K \\to \\mathbb{R}`
16 |
17 | :Examples:
18 |
19 | >>> tn.cross(function=lambda x: x**2, tensors=[t]) # Compute the element-wise square of `t` using 5 TT-ranks
20 |
21 | >>> domain = [torch.linspace(-1, 1, 32)]*5
22 | >>> tn.cross(function=lambda x, y, z, t, w: x**2 + y*z + torch.cos(t + w), domain=domain) # Approximate a function over the rectangle :math:`[-1, 1]^5`
23 |
24 | >>> tn.cross(function=lambda x: torch.sum(x**2, dim=1), domain=domain, function_arg='matrix') # An example where the function accepts a matrix
25 |
26 | References:
27 |
28 | - I. Oseledets, E. Tyrtyshnikov: `"TT-cross Approximation for Multidimensional Arrays" (2009) `_
29 | - D. Savostyanov, I. Oseledets: `"Fast Adaptive Interpolation of Multi-dimensional Arrays in Tensor Train Format" (2011) `_
30 | - S. Dolgov, R. Scheichl: `"A Hybrid Alternating Least Squares - TT Cross Algorithm for Parametric PDEs" (2018) `_
31 | - A. Mikhalev's `maxvolpy package `_
32 | - I. Oseledets (and others)'s `ttpy package `_
33 |
34 | :param function: should produce a vector of :math:`P` elements. Accepts either :math:`N` comma-separated vectors, or a matrix (see `function_arg`)
35 | :param domain: a list of :math:`N` vectors (incompatible with `tensors`)
36 | :param tensors: a :class:`Tensor` or list thereof (incompatible with `domain`)
37 | :param function_arg: if 'vectors', `function` accepts :math:`N` vectors of length :math:`P` each. If 'matrix', a matrix of shape :math:`P \\times N`.
38 | :param ranks_tt: int or list of :math:`N-1` ints. If None, will be determined adaptively
39 | :param kickrank: when adaptively found, ranks will be increased by this amount after every iteration (full sweep left-to-right and right-to-left)
40 | :param rmax: this rank will not be surpassed
41 | :param eps: the procedure will stop after this validation error is met (as measured after each iteration)
42 | :param max_iter: int
43 | :param val_size: size of the validation set
44 | :param verbose: default is True
45 | :param return_info: if True, will also return a dictionary with informative metrics about the algorithm's outcome
46 |
47 | :return: an N-dimensional TT :class:`Tensor` (if `return_info`=True, also a dictionary)
48 | """
49 |
50 | assert domain is not None or tensors is not None
51 | assert function_arg in ('vectors', 'matrix')
52 | if function_arg == 'matrix':
53 | def f(*args):
54 | return function(torch.cat([arg[:, None] for arg in args], dim=1))
55 | else:
56 | f = function
57 | if tensors is None:
58 | tensors = tn.meshgrid(domain)
59 | if not hasattr(tensors, '__len__'):
60 | tensors = [tensors]
61 | tensors = [t.decompress_tucker_factors(_clone=False) for t in tensors]
62 | Is = list(tensors[0].shape)
63 | N = len(Is)
64 |
65 | # Process ranks and cap them, if needed
66 | if ranks_tt is None:
67 | ranks_tt = 1
68 | else:
69 | kickrank = None
70 | if not hasattr(ranks_tt, '__len__'):
71 | ranks_tt = [ranks_tt]*(N-1)
72 | ranks_tt = [1] + list(ranks_tt) + [1]
73 | Rs = np.array(ranks_tt)
74 | for n in list(range(1, N)) + list(range(N-1, -1, -1)):
75 | Rs[n] = min(Rs[n-1]*Is[n-1], Rs[n], Is[n]*Rs[n+1])
76 |
77 | # Initialize cores at random
78 | cores = [torch.randn(Rs[n], Is[n], Rs[n+1]) for n in range(N)]
79 |
80 | # Prepare left and right sets
81 | lsets = [np.array([[0]])] + [None]*(N-1)
82 | randint = np.hstack([np.random.randint(0, Is[n+1], [max(Rs), 1]) for n in range(N-1)] + [np.zeros([max(Rs), 1])])
83 | rsets = [randint[:Rs[n+1], n:] for n in range(N-1)] + [np.array([[0]])]
84 |
85 | # Initialize left and right interfaces for `tensors`
86 | def init_interfaces():
87 | t_linterfaces = []
88 | t_rinterfaces = []
89 | for t in tensors:
90 | linterfaces = [torch.ones(1, t.ranks_tt[0])] + [None]*(N-1)
91 | rinterfaces = [None]*(N-1) + [torch.ones(t.ranks_tt[t.dim()], 1)]
92 | for j in range(N-1):
93 | M = torch.ones(t.cores[-1].shape[-1], len(rsets[j]))
94 | for n in range(N-1, j, -1):
95 | if t.cores[n].dim() == 3: # TT core
96 | M = torch.einsum('iaj,ja->ia', (t.cores[n][:, rsets[j][:, n-1-j], :], M))
97 | else: # CP factor
98 | M = torch.einsum('ai,ia->ia', (t.cores[n][rsets[j][:, n-1-j], :], M))
99 | rinterfaces[j] = M
100 | t_linterfaces.append(linterfaces)
101 | t_rinterfaces.append(rinterfaces)
102 | return t_linterfaces, t_rinterfaces
103 | t_linterfaces, t_rinterfaces = init_interfaces()
104 |
105 | # Create a validation set
106 | Xs_val = [torch.as_tensor(np.random.choice(I, val_size)) for I in Is]
107 | ys_val = f(*[t[Xs_val].torch() for t in tensors])
108 | if ys_val.dim() > 1:
109 | assert ys_val.dim() == 2
110 | assert ys_val.shape[1] == 1
111 | ys_val = ys_val[:, 0]
112 | assert len(ys_val) == val_size
113 | norm_ys_val = torch.norm(ys_val)
114 |
115 | if verbose:
116 | print('Cross-approximation over a {}D domain containing {:g} grid points:'.format(N, tensors[0].numel()))
117 | start = time.time()
118 | converged = False
119 |
120 | info = {
121 | 'nsamples': 0,
122 | 'eval_time': 0,
123 | 'val_epss': []
124 | }
125 |
126 | def evaluate_function(j): # Evaluate function over Rs[j] x Rs[j+1] fibers, each of size I[j]
127 | Xs = []
128 | for k, t in enumerate(tensors):
129 | if tensors[k].cores[j].dim() == 3: # TT core
130 | V = torch.einsum('ai,ibj,jc->abc', (t_linterfaces[k][j], tensors[k].cores[j], t_rinterfaces[k][j]))
131 | else: # CP factor
132 | V = torch.einsum('ai,bi,ic->abc', (t_linterfaces[k][j], tensors[k].cores[j], t_rinterfaces[k][j]))
133 | Xs.append(V.flatten())
134 |
135 | eval_start = time.time()
136 | evaluation = f(*Xs)
137 | info['eval_time'] += time.time() - eval_start
138 |
139 | # Check for nan/inf values
140 | if evaluation.dim() == 2:
141 | evaluation = evaluation[:, 0]
142 | invalid = (torch.isnan(evaluation) | torch.isinf(evaluation)).nonzero()
143 | if len(invalid) > 0:
144 | invalid = invalid[0].item()
145 | raise ValueError('Invalid return value for function {}: f({}) = {}'.format(function, ', '.join('{:g}'.format(x[invalid].numpy()) for x in Xs),
146 | f(*[x[invalid:invalid+1][:, None] for x in Xs]).item()))
147 |
148 | V = torch.reshape(evaluation, [Rs[j], Is[j], Rs[j + 1]])
149 | info['nsamples'] += V.numel()
150 | return V
151 |
152 | # Sweeps
153 | for i in range(max_iter):
154 |
155 | if verbose:
156 | print('iter: {: <{}}'.format(i, len('{}'.format(max_iter))+1), end='')
157 | sys.stdout.flush()
158 |
159 | left_locals = []
160 |
161 | # Left-to-right
162 | for j in range(0, N-1):
163 |
164 | # Update tensors for current indices
165 | V = evaluate_function(j)
166 |
167 | # QR + maxvol towards the right
168 | V = torch.reshape(V, [-1, V.shape[2]]) # Left unfolding
169 | Q, R = torch.qr(V)
170 | local, _ = maxvolpy.maxvol.maxvol(Q.detach().numpy())
171 | V = torch.gels(Q.t(), Q[local, :].t())[0].t()
172 | cores[j] = torch.reshape(V, [Rs[j], Is[j], Rs[j+1]])
173 | left_locals.append(local)
174 |
175 | # Map local indices to global ones
176 | local_r, local_i = np.unravel_index(local, [Rs[j], Is[j]])
177 | lsets[j+1] = np.c_[lsets[j][local_r, :], local_i]
178 | for k, t in enumerate(tensors):
179 | if t.cores[j].dim() == 3: # TT core
180 | t_linterfaces[k][j+1] = torch.einsum('ai,iaj->aj', (t_linterfaces[k][j][local_r, :], t.cores[j][:, local_i, :]))
181 | else: # CP factor
182 | t_linterfaces[k][j+1] = torch.einsum('ai,ai->ai', (t_linterfaces[k][j][local_r, :], t.cores[j][local_i, :]))
183 |
184 | # Right-to-left sweep
185 | for j in range(N-1, 0, -1):
186 |
187 | # Update tensors for current indices
188 | V = evaluate_function(j)
189 |
190 | # QR + maxvol towards the left
191 | V = torch.reshape(V, [Rs[j], -1]) # Right unfolding
192 | Q, R = torch.qr(V.t())
193 | local, _ = maxvolpy.maxvol.maxvol(Q.detach().numpy())
194 | V = torch.gels(Q.t(), Q[local, :].t())[0]
195 | cores[j] = torch.reshape(torch.as_tensor(V), [Rs[j], Is[j], Rs[j+1]])
196 |
197 | # Map local indices to global ones
198 | local_i, local_r = np.unravel_index(local, [Is[j], Rs[j+1]])
199 | rsets[j-1] = np.c_[local_i, rsets[j][local_r, :]]
200 | for k, t in enumerate(tensors):
201 | if t.cores[j].dim() == 3: # TT core
202 | t_rinterfaces[k][j-1] = torch.einsum('iaj,ja->ia', (t.cores[j][:, local_i, :], t_rinterfaces[k][j][:, local_r]))
203 | else: # CP factor
204 | t_rinterfaces[k][j-1] = torch.einsum('ai,ia->ia', (t.cores[j][local_i, :], t_rinterfaces[k][j][:, local_r]))
205 |
206 | # Leave the first core ready
207 | V = evaluate_function(0)
208 | cores[0] = V
209 |
210 | # Evaluate validation error
211 | val_eps = torch.norm(ys_val - tn.Tensor(cores)[Xs_val].torch()) / norm_ys_val
212 | info['val_epss'].append(val_eps)
213 | if val_eps < eps:
214 | converged = True
215 |
216 | if verbose: # Print status
217 | print('| eps: {:.3e}'.format(val_eps), end='')
218 | print(' | total time: {:8.4f} | largest rank: {:3d}'.format(time.time() - start, max(Rs)), end='')
219 | if converged:
220 | print(' <- converged: eps < {}'.format(eps))
221 | elif i == max_iter-1:
222 | print(' <- max_iter was reached: {}'.format(max_iter))
223 | else:
224 | print()
225 | if converged:
226 | break
227 | elif i < max_iter-1 and kickrank is not None: # Augment ranks
228 | newRs = Rs.copy()
229 | newRs[1:-1] = np.minimum(rmax, newRs[1:-1]+kickrank)
230 | for n in list(range(1, N)) + list(range(N-1, 0, -1)):
231 | newRs[n] = min(newRs[n-1]*Is[n-1], newRs[n], Is[n]*newRs[n+1])
232 | extra = np.hstack([np.random.randint(0, Is[n+1], [max(newRs), 1]) for n in range(N-1)] + [np.zeros([max(newRs), 1])])
233 | for n in range(N-1):
234 | if newRs[n+1] > Rs[n+1]:
235 | rsets[n] = np.vstack([rsets[n], extra[:newRs[n+1]-Rs[n+1], n:]])
236 | Rs = newRs
237 | t_linterfaces, t_rinterfaces = init_interfaces() # Recompute interfaces
238 |
239 | if val_eps > eps:
240 | logging.warning('eps={:g} (larger than {}) when cross-approximating {}'.format(val_eps, eps, function))
241 |
242 | if verbose:
243 | print('Did {} function evaluations, which took {:.4g}s ({:.4g} evals/s)'.format(info['nsamples'], info['eval_time'], info['nsamples'] / info['eval_time']))
244 | print()
245 |
246 | if return_info:
247 | info['lsets'] = lsets
248 | info['rsets'] = rsets
249 | info['left_locals'] = left_locals
250 | info['total_time'] = time.time()-start
251 | info['val_eps'] = val_eps
252 | return tn.Tensor([torch.Tensor(c) for c in cores]), info
253 | else:
254 | return tn.Tensor([torch.Tensor(c) for c in cores])
255 |
--------------------------------------------------------------------------------
/tntorch/derivatives.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 |
4 |
5 | def partialset(t, order=1, mask=None, bounds=None):
6 | """
7 | Given a tensor, compute another one that contains all partial derivatives of certain order(s) and according to some optional mask.
8 |
9 | :Examples:
10 |
11 | >>> t = tn.rand([10, 10, 10]) # A 3D tensor
12 | >>> x, y, z = tn.symbols(3)
13 | >>> partialset(t, 1, x) # x
14 | >>> partialset(t, 2, x) # xx, xy, xz
15 | >>> partialset(t, 2, tn.only(y | z)) # yy, yz, zz
16 |
17 | :param t: a :class:`Tensor`
18 | :param order: an int or list of ints. Default is 1
19 | :param mask: an optional mask to select only a subset of partials
20 | :param bounds: a list of pairs [lower bound, upper bound] specifying parameter ranges (used to compute derivative steps). If None (default), all steps will be 1
21 |
22 | :return: a :class:`Tensor`
23 | """
24 |
25 | if bounds is None:
26 | bounds = [[0, sh-1] for sh in t.shape]
27 | if not hasattr(order, '__len__'):
28 | order = [order]
29 |
30 | max_order = max(order)
31 | def diff(core, n):
32 | if core.dim() == 3:
33 | pad = torch.zeros(core.shape[0], 1, core.shape[2])
34 | else:
35 | pad = torch.zeros(1, core.shape[1])
36 | if core.shape[1] == 1:
37 | return pad
38 | step = (bounds[n][1] - bounds[n][0]) / (core.shape[-2] - 1)
39 | return torch.cat(((core[..., 1:, :] - core[..., :-1, :]) / step, pad), dim=-2)
40 | cores = []
41 | idxs = []
42 | for n in range(t.dim()):
43 | if t.Us[n] is None:
44 | stack = [t.cores[n]]
45 | else:
46 | stack = [torch.einsum('ijk,aj->iak', (t.cores[n], t.Us[n]))]
47 | idx = torch.zeros([t.shape[n]])
48 | for o in range(1, max_order+1):
49 | stack.append(diff(stack[-1], n))
50 | idx = torch.cat((idx, torch.ones(stack[-1].shape[-2])*o))
51 | if o == max_order:
52 | break
53 | cores.append(torch.cat(stack, dim=-2))
54 | idxs.append(idx)
55 | d = tn.Tensor(cores, idxs=idxs)
56 | wm = tn.automata.weight_mask(t.dim(), order, nsymbols=max_order+1)
57 | if mask is not None:
58 | wm = tn.mask(wm, mask)
59 | result = tn.mask(d, wm)
60 | result.idxs = idxs
61 | return result
62 |
63 |
64 | def partial(t, dim, order=1, bounds=None, periodic=False, pad='top'):
65 | """
66 | Compute a single partial derivative.
67 |
68 | :param t: a :class:`Tensor`
69 | :param dim: int or list of ints
70 | :param order: how many times to derive. Default is 1
71 | :param bounds: variable(s) range bounds (to compute the derivative step). If None (default), step 1 will be assumed
72 | :param periodic: int or list of ints (same as `dim`), mark dimensions with periodicity
73 | :param pad: string or list of strings indicating dimension zero-padding after differentiation. If 'top' (default) or 'bottom', the tensor will retain the same shape after the derivative. If 'none' it will lose one slice
74 |
75 | :return: a :class:`Tensor`
76 | """
77 |
78 | if not hasattr(dim, '__len__'):
79 | dim = [dim]
80 | if bounds is None:
81 | bounds = [[0, t.shape[n]-1] for n in range(t.dim())]
82 | if not hasattr(bounds[0], '__len__'):
83 | bounds = [bounds]
84 | if not hasattr(periodic, '__len__'):
85 | periodic = [periodic]*len(dim)
86 | if not isinstance(pad, list):
87 | pad = [pad]*len(dim)
88 |
89 | t2 = t.clone()
90 | for i, d in enumerate(dim):
91 | for o in range(1, order+1):
92 | if periodic[i]:
93 | step = (bounds[i][1] - bounds[i][0]) / t.shape[d]
94 | if t2.Us[d] is None:
95 | t2.cores[d] = (t2.cores[d][:, list(range(1, t2.cores[d].shape[1]))+[0], :] - t2.cores[d])
96 | else:
97 | t2.Us[d] = (t2.Us[d][list(range(1, t2.Us[d].shape[0]))+[0], :] - t2.Us[d]) / step
98 | else:
99 | step = (bounds[i][1] - bounds[i][0]) / (t.shape[d]-1)
100 | if t2.Us[d] is None:
101 | t2.cores[d] = (t2.cores[d][..., 1:, :] - t2.cores[d][..., :-1, :]) / step
102 | if t2.cores[d].dim() == 3:
103 | pad_slice = torch.zeros(t2.cores[d].shape[0], 1, t2.cores[d].shape[2])
104 | else:
105 | pad_slice = torch.zeros(1, t2.cores[d].shape[1])
106 | if pad[i] == 'top':
107 | t2.cores[d] = torch.cat((t2.cores[d], pad_slice), dim=-2)
108 | if pad[i] == 'bottom':
109 | t2.cores[d] = torch.cat((pad_slice, t2.cores[d]), dim=-2)
110 | else:
111 | t2.Us[d] = (t2.Us[d][1:, :] - t2.Us[d][:-1, :]) / step
112 | if pad[i] == 'top':
113 | t2.Us[d] = torch.cat((t2.Us[d], torch.zeros(1, t2.cores[d].shape[-2])), dim=0)
114 | if pad[i] == 'bottom':
115 | t2.Us[d] = torch.cat((torch.zeros(1, t2.cores[d].shape[-2]), t2.Us[d]), dim=0)
116 | return t2
117 |
118 |
119 | def gradient(t, dim='all', bounds=None):
120 | """
121 | Compute the gradient of a tensor.
122 |
123 | :param t: a :class:`Tensor`
124 | :param dim: an integer (or list of integers). Default is all
125 | :param bounds: a pair (or list of pairs) of reals, or None. The bounds for each variable
126 |
127 | :return: a :class:`Tensor` (or a list thereof)
128 | """
129 |
130 | if dim == 'all':
131 | dim = range(t.dim())
132 | if bounds is None:
133 | bounds = [[0, t.shape[d]-1] for d in dim]
134 | if not hasattr(bounds, '__len__'):
135 | bounds = [bounds]*len(dim)
136 |
137 | if not hasattr(dim, '__len__'):
138 | return partial(t, dim, bounds)
139 | else:
140 | return [partial(t, d, order=1, bounds=b) for d, b in zip(dim, bounds)]
141 |
142 |
143 | def active_subspace(t):
144 | """
145 | Compute the main variational directions of a tensor.
146 |
147 | Reference: P. Constantine et al. `"Discovering an Active Subspace in a Single-Diode Solar Cell Model" (2017) `_
148 |
149 | See also P. Constantine's `data set repository `_.
150 |
151 | :param t: input tensor
152 | :return: (eigvals, eigvecs): an array and a matrix, encoding the eigenpairs in descending order
153 | """
154 |
155 | grad = tn.gradient(t, dim='all')
156 |
157 | M = torch.zeros(t.dim(), t.dim())
158 | for i in range(t.dim()):
159 | for j in range(i, t.dim()):
160 | M[i, j] = tn.dot(grad[i], grad[j]) / t.size
161 | M[j, i] = M[i, j]
162 |
163 | w, v = torch.symeig(M, eigenvectors=True)
164 | idx = range(t.dim()-1, -1, -1)
165 | w = w[idx]
166 | v = v[:, idx]
167 | return w, v
168 |
169 |
170 | def divergence(ts, bounds=None):
171 | """
172 | Computes the divergence (scalar field) out of a vector field encoded in a tensor.
173 |
174 | :param ts: an ND vector field, encoded as a list of N ND tensors
175 | :param bounds:
176 |
177 | :return: a scalar field
178 | """
179 |
180 | assert ts[0].dim() == len(ts)
181 | assert all([t.shape == ts[0].shape for t in ts[1:]])
182 | if bounds is None:
183 | bounds = [None]*len(ts)
184 | elif not hasattr(bounds[0], '__len__'):
185 | bounds = [bounds for n in range(len(ts))]
186 | assert len(bounds) == len(ts)
187 |
188 | return sum([tn.partial(ts[n], n, order=1, bounds=bounds[n]) for n in range(len(ts))])
189 |
190 |
191 | def curl(ts, bounds=None):
192 | """
193 | Compute the curl of a 3D vector field.
194 |
195 | :param ts: three 3D tensors encoding the :math:`x, y, z` vector coordinates respectively
196 | :param bounds:
197 |
198 | :return: three tensors of the same shape
199 | """
200 |
201 | assert [t.dim() == 3 for t in ts]
202 | assert len(ts) == 3
203 | if bounds is None:
204 | bounds = [None for n in range(3)]
205 | elif not hasattr(bounds[0], '__len__'):
206 | bounds = [bounds for n in range(3)]
207 | assert len(bounds) == 3
208 |
209 | return [tn.partial(ts[2], 1, bounds=bounds[1]) - tn.partial(ts[1], 2, bounds=bounds[2]),
210 | tn.partial(ts[0], 2, bounds=bounds[2]) - tn.partial(ts[2], 0, bounds=bounds[0]),
211 | tn.partial(ts[1], 0, bounds=bounds[0]) - tn.partial(ts[0], 1, bounds=bounds[1])]
212 |
213 |
214 | def laplacian(t, bounds=None):
215 | """
216 | Computes the Laplacian of a scalar field.
217 |
218 | :param t: a :class:`Tensor`
219 | :param bounds:
220 |
221 | :return: a :class:`Tensor`
222 | """
223 |
224 | if bounds is None:
225 | bounds = [None]*t.dim()
226 | elif not hasattr(bounds[0], '__len__'):
227 | bounds = [bounds for n in range(t.dim())]
228 | assert len(bounds) == t.dim()
229 |
230 | return sum([tn.partial(t, n, order=2, bounds=bounds[n]) for n in range(t.dim())])
231 |
--------------------------------------------------------------------------------
/tntorch/logic.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import tntorch as tn
3 | import numpy as np
4 |
5 |
6 | def true(N):
7 | """
8 | Create a formula (N-dimensional tensor) that is always true.
9 |
10 | :param N: an integer
11 |
12 | :return: a :math:`2^N` :class:`Tensor`
13 | """
14 |
15 | return tn.Tensor([torch.ones([1, 2, 1]) for n in range(N)])
16 |
17 |
18 | def false(N):
19 | """
20 | Create a formula (N-dimensional tensor) that is always false.
21 |
22 | :param N: an integer
23 |
24 | :return: a :math:`2^N` :class:`Tensor`
25 | """
26 |
27 | return tn.Tensor([torch.zeros([1, 2, 1]) for n in range(N)])
28 |
29 |
30 | def all(N, which=None):
31 | """
32 | Create a formula (N-dimensional tensor) that is satisfied iff all symbols are true.
33 |
34 | :param N: an integer
35 | :param which: list of integers to consider (default: all)
36 |
37 | :return: a :math:`2^N` :class:`Tensor`
38 | """
39 |
40 | if which is None:
41 | which = list(range(N))
42 |
43 | cores = []
44 | for n in range(N):
45 | if n in which:
46 | cores.append(torch.cat([torch.zeros(1, 1, 1), torch.ones(1, 1, 1)], dim=1))
47 | else:
48 | cores.append(torch.ones(1, 2, 1))
49 | return tn.Tensor(cores)
50 |
51 |
52 | def none(N, which=None):
53 | """
54 | Create a formula (N-dimensional tensor) that is satisfied iff all symbols are false.
55 |
56 | :param N: an integer
57 | :param which: list of integers to consider (default: all)
58 |
59 | :return: a :math:`2^N` :class:`Tensor`
60 | """
61 |
62 | if which is None:
63 | which = list(range(N))
64 |
65 | cores = []
66 | for n in range(N):
67 | if n in which:
68 | cores.append(torch.cat([torch.ones(1, 1, 1), torch.zeros(1, 1, 1)], dim=1))
69 | else:
70 | cores.append(torch.ones(1, 2, 1))
71 | return tn.Tensor(cores)
72 |
73 |
74 | def any(N, which=None):
75 | """
76 | Create a formula (N-dimensional tensor) that is satisfied iff at least one symbol is true.
77 |
78 | :param N: an integer
79 | :param which: list of integers to consider (default: all)
80 |
81 | :return: a :math:`2^N` :class:`Tensor`
82 | """
83 |
84 | return ~none(N, which)
85 |
86 |
87 | def one(N, which=None):
88 | """
89 | Create a formula (N-dimensional tensor) that is satisfied iff one and only one input is true.
90 |
91 | Also known as "n-ary exclusive or".
92 |
93 | :param N: an integer
94 | :param which: list of integers to consider (default: all)
95 |
96 | :return: a :math:`2^N` :class:`Tensor`
97 | """
98 |
99 | if which is None:
100 | return tn.automata.weight_mask(N, 1)
101 | else:
102 | return tn.automata.weight_mask(N, 1) & tn.any(N, which)
103 |
104 |
105 | def symbols(N):
106 | """
107 | Generate N Boolean symbols (each represented as an N-dimensional tensor).
108 |
109 | :param N: an integer
110 |
111 | :return: a list of N :math:`2^N` :class:`Tensor`
112 | """
113 |
114 | return [presence(N, n) for n in range(N)]
115 |
116 |
117 | def relevant_symbols(t):
118 | """
119 | Finds all variables whose values affect the formula's output in at least one case.
120 |
121 | :param t: a :math:`2^N` :class:`Tensor`
122 |
123 | :return: a list of integers
124 | """
125 |
126 | cores = [torch.cat((c[:, 1:2, :]-c[:, 0:1, :], c), dim=1) for c in t.cores]
127 | t2 = tn.Tensor(cores)
128 | return [n for n in range(t.dim()) if tn.norm(t2[[slice(1, 3)]*n + [0] + [slice(1, 3)]*(t.dim()-n-1)]) > 1e-10]
129 |
130 |
131 | def irrelevant_symbols(t):
132 | """
133 | Finds all variables whose values never affect the formula's output.
134 |
135 | :param t: a :math:`2^N` :class:`Tensor`
136 |
137 | :return: a list of integers
138 | """
139 |
140 | rel = relevant_symbols(t)
141 | return [n for n in range(t.dim()) if n not in rel]
142 |
143 |
144 | def only(t):
145 | """
146 | Forces all irrelevant symbols to be zero.
147 |
148 | :Example:
149 |
150 | >>> x, y = tn.symbols(2)
151 | >>> tn.sum(x) # Result: 2 (x = True, y = False, and x = True, y = True)
152 | >>> tn.sum(tn.only(x)) # Result: 1 (x = True, y = False)
153 |
154 | :param: a :math:`2^N` :class:`Tensor`
155 |
156 | :return: a masked :class:`Tensor`
157 | """
158 |
159 | return tn.mask(t, absence(t.dim(), irrelevant_symbols(t)))
160 |
161 |
162 | def presence(N, which):
163 | """
164 | True iff all symbols in `which` are present.
165 |
166 | :param N: int
167 | :param which: a list of ints
168 |
169 | :return: a masked :class:`Tensor`
170 | """
171 |
172 | which = np.atleast_1d(which)
173 | cores = [torch.ones([1, 2, 1]) for n in range(N)]
174 | for w in which:
175 | cores[w][0, 0, 0] = 0
176 | return tn.Tensor(cores)
177 |
178 |
179 | def absence(N, which):
180 | """
181 | True iff all symbols in `which` are absent.
182 |
183 | :param N: int
184 | :param which: a list of ints
185 |
186 | :return: a masked :class:`Tensor`
187 | """
188 |
189 | which = np.atleast_1d(which)
190 | cores = [torch.ones([1, 2, 1]) for n in range(N)]
191 | for w in which:
192 | cores[w][0, 1, 0] = 0
193 | return tn.Tensor(cores)
194 |
195 |
196 | def is_tautology(t):
197 | """
198 | Checks if a formula is always satisfied.
199 |
200 | :param t: a :math:`2^N` :class:`Tensor`
201 |
202 | :return: True if `t` is a tautology; False otherwise
203 | """
204 |
205 | return bool(tn.norm(~t) <= 1e-6)
206 |
207 |
208 | def is_contradiction(t):
209 | """
210 | Checks if a formula is never satisfied.
211 |
212 | :param t: a :math:`2^N` tensor
213 |
214 | :return: True if `t` is a contradiction; False otherwise
215 | """
216 |
217 | return bool(tn.norm(t) <= 1e-6)
218 |
219 |
220 | def is_satisfiable(t):
221 | """
222 | Checks if a formula can be satisfied.
223 |
224 | :param t: a :math:`2^N` :class:`Tensor`
225 |
226 | :return: True if `t` is satisfiable; False otherwise
227 | """
228 |
229 | return bool(tn.sum(t) >= 1e-6)
230 |
231 |
232 | def implies(t1, t2):
233 | """
234 | Checks if a formula implies another one (i.e. is a sufficient condition).
235 |
236 | :param t1: a :math:`2^N` :class:`Tensor`
237 | :param t2: a :math:`2^N` :class:`Tensor`
238 |
239 | :return: True if `t1` implies `t2`; False otherwise
240 | """
241 |
242 | return bool(is_contradiction(t1 & ~t2))
243 |
244 |
245 | def equiv(t1, t2):
246 | """
247 | Checks if two formulas are logically equivalent.
248 |
249 | :param t1: a :math:`2^N` :class:`Tensor`
250 | :param t2: a :math:`2^N` :class:`Tensor`
251 |
252 | :return: True if `t1` implies `t2` and vice versa; False otherwise
253 | """
254 |
255 | return implies(t1, t2) & implies(t2, t1)
256 |
--------------------------------------------------------------------------------
/tntorch/metrics.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | import tntorch as tn
4 |
5 |
6 | def _process(gt, approx):
7 | """
8 | If *only one* of the arguments is a compressed tensor, we decompress it
9 | """
10 |
11 | # assert np.array_equal(gt.shape, approx.shape)
12 | is1 = isinstance(gt, tn.Tensor)
13 | is2 = isinstance(approx, tn.Tensor)
14 | if is1 and is2:
15 | return gt, approx
16 | if is1:
17 | gt = gt.torch()
18 | if is2:
19 | approx = approx.torch()
20 | return gt, approx
21 |
22 |
23 | def dot(t1, t2, k=None):
24 | """
25 | Generalized tensor dot product: contracts the k leading dimensions of two tensors of dimension N1 and N2.
26 |
27 | - If k is None:
28 | - If N1 == N2, returns a scalar (dot product between the two tensors)
29 | - If N1 < N2, the result will have dimension N2 - N1
30 | - If N2 < N1, the result will have dimension N1 - N2
31 |
32 | Example: suppose t1 has shape 3 x 4 and t2 has shape 3 x 4 x 5 x 6. Then, tn.dot(t1, t2) will have shape
33 | 5 x 6.
34 |
35 | - If k is given:
36 | The trailing (N1-k) dimensions from the 1st tensor will be sorted backwards, and then the trailing (N2-k)
37 | dimensions from the 2nd tensor will be appended to them.
38 |
39 | Example: suppose t1 has shape 3 x 4 x 5 x 6 and t2 has shape 3 x 4 x 10 x 11. Then, tn.dot(t1, t2, k=2) will
40 | have shape 6 x 5 x 10 x 11.
41 |
42 | :param t1: a :class:`Tensor` (or a PyTorch tensor)
43 | :param t2: a :class:`Tensor` (or a PyTorch tensor)
44 | :param k: an int (default: None)
45 |
46 | :return: a scalar (if k is None and t1.dim() == t2.dim()), a tensor otherwise
47 | """
48 |
49 | def _project_spatial(core, M):
50 | if core.dim() == 3:
51 | return torch.einsum('iak,aj->ijk', (core, M))
52 | else:
53 | return torch.einsum('ak,aj->jk', (core, M))
54 |
55 | def _project_left(core, M):
56 | if core.dim() == 3:
57 | return torch.einsum('sr,rai->sai', (M, core))
58 | else:
59 | return torch.einsum('sr,ar->sar', (M, core))
60 |
61 | t1, t2 = _process(t1, t2)
62 | if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor):
63 | return t1.flatten().dot(t2.flatten())
64 | Lprod = torch.ones([t2.ranks_tt[0], t1.ranks_tt[0]], device=t1.cores[0].device)
65 | if k is None:
66 | k = min(t1.dim(), t2.dim())
67 | assert k <= t1.dim() and k <= t2.dim()
68 | if not np.array_equal(t1.shape[:k], t2.shape[:k]):
69 | raise ValueError('Dot product requires leading dimensions to be equal, but they are {} and {}'.format(t1.shape[:k], t2.shape[:k]))
70 |
71 | # Crunch first k dimensions of both tensors
72 | for mu in range(k):
73 | core1 = t1.cores[mu]
74 | core2 = t2.cores[mu]
75 | # First part: absorb Tucker factors
76 | if t1.Us[mu] is None:
77 | if t2.Us[mu] is not None:
78 | core1 = _project_spatial(core1, t2.Us[mu])
79 | elif t2.Us[mu] is None:
80 | core2 = _project_spatial(core2, t1.Us[mu])
81 | else: # Both have Tucker factors
82 | core2 = _project_spatial(core2, torch.matmul(t2.Us[mu].t(), t1.Us[mu]))
83 | # Second part: advance running factor `Lprod`
84 | Ucore = _project_left(core1, Lprod)
85 | Vcore = core2
86 | if Vcore.dim() == 3:
87 | Lprod = torch.matmul(tn.left_unfolding(Vcore).t(), tn.left_unfolding(Ucore))
88 | else:
89 | Lprod = torch.einsum('as,sar->sr', (Vcore, Ucore))
90 |
91 | # Deal with unprocessed dimensions, if any
92 | if k < t1.dim():
93 | t1trail = tn.Tensor(t1.cores[k:], t1.Us[k:]).clone()
94 | t1trail.cores[0] = _project_left(t1trail.cores[0], Lprod)
95 | if k == t2.dim():
96 | return t1trail
97 | else:
98 | t2trail = tn.Tensor(t2.cores[k:], t2.Us[k:]).clone()
99 | t1trail = tn.transpose(t1trail)
100 | return tn.Tensor(t1trail.cores + t2trail.cores, Us=t1trail.Us + t2trail.Us)
101 | else:
102 | if k == t2.dim():
103 | return torch.sum(Lprod)
104 | else:
105 | t2trail = tn.Tensor(t2.cores[k:], t2.Us[k:])#.clone()
106 | t2trail.cores[0] = _project_left(t2trail.cores[0], Lprod.t())
107 | return t2trail
108 |
109 |
110 | def dist(t1, t2):
111 | """
112 | Computes the Euclidean distance between two tensors. Generally faster than `tn.norm(t1-t2)`.
113 |
114 | :param t1: a :class:`Tensor` (or a PyTorch tensor)
115 | :param t2: a :class:`Tensor` (or a PyTorch tensor)
116 |
117 | :return: a scalar :math:`\ge 0`
118 | """
119 |
120 | t1, t2 = _process(t1, t2)
121 | if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor):
122 | return torch.dist(t1, t2)
123 | return torch.sqrt(tn.dot(t1, t1) + tn.dot(t2, t2) - 2 * tn.dot(t1, t2).clamp(0))
124 |
125 |
126 | def relative_error(gt, approx):
127 | """
128 | Computes the relative error between two tensors (torch or tntorch).
129 |
130 | :param gt: a torch or tntorch tensor
131 | :param approx: a torch or tntorch tensor
132 |
133 | :return: a scalar :math:`\ge 0`
134 | """
135 |
136 | gt, approx = _process(gt, approx)
137 | if isinstance(gt, torch.Tensor) and isinstance(approx, torch.Tensor):
138 | return torch.dist(gt, approx) / torch.norm(gt)
139 | dotgt = tn.dot(gt, gt)
140 | return torch.sqrt((dotgt + tn.dot(approx, approx) - 2*tn.dot(gt, approx)).clamp(0)) / torch.sqrt(dotgt.clamp(0))
141 |
142 |
143 | def rmse(gt, approx):
144 | """
145 | Computes the RMSE between two tensors (torch or tntorch).
146 |
147 | :param gt: a torch or tntorch tensor
148 | :param approx: a torch or tntorch tensor
149 |
150 | :return: a scalar :math:`\ge 0`
151 | """
152 |
153 | gt, approx = _process(gt, approx)
154 | if isinstance(gt, torch.Tensor) and isinstance(approx, torch.Tensor):
155 | return torch.dist(gt, approx) / np.sqrt(gt.numel())
156 | return tn.dist(gt, approx) / torch.sqrt(gt.numel())
157 |
158 |
159 | def r_squared(gt, approx):
160 | """
161 | Computes the :math:`R^2` score between two tensors (torch or tntorch).
162 |
163 | :param gt: a torch or tntorch tensor
164 | :param approx: a torch or tntorch tensor
165 |
166 | :return: a scalar <= 1
167 | """
168 |
169 | gt, approx = _process(gt, approx)
170 | if isinstance(gt, torch.Tensor) and isinstance(approx, torch.Tensor):
171 | return 1 - torch.dist(gt, approx)**2 / torch.dist(gt, torch.mean(gt))**2
172 | return 1 - tn.dist(gt, approx)**2 / tn.normsq(gt-tn.mean(gt))
173 |
174 |
175 | def sum(t, dim=None, keepdim=False, _normalize=False):
176 | """
177 | Compute the sum of a tensor along all (or some) of its dimensions.
178 |
179 | :param t: input :class:`Tensor`
180 | :param dim: an int or list of ints. By default, all dims will be summed
181 | :param keepdim: if True, summed dimensions will be kept as singletons. Default is False
182 |
183 | :return: a scalar (if keepdim is False and all dims were chosen) or :class:`Tensor` otherwise
184 | """
185 |
186 | if dim is None:
187 | dim = np.arange(t.dim())
188 | if not hasattr(dim, '__len__'):
189 | dim = [dim]
190 | device = t.cores[0].device
191 | if _normalize:
192 | us = [(1./t.shape[d])*torch.ones(t.shape[d]).to(device) for d in dim]
193 | else:
194 | us = [torch.ones(t.shape[d]).to(device) for d in dim]
195 | result = tn.ttm(t, us, dim)
196 | if keepdim:
197 | return result
198 | else:
199 | return tn.squeeze(result, dim)
200 |
201 |
202 | def mean(t, dim=None, keepdim=False):
203 | """
204 | Computes the mean of a :class:`Tensor` along all or some of its dimensions.
205 |
206 | :param t: a :class:`Tensor`
207 | :param dim: an int or list of ints (default: all)
208 | :param keepdim: whether to keep the same number of dimensions
209 |
210 | :return: a scalar
211 | """
212 |
213 | return tn.sum(t, dim, keepdim, _normalize=True)
214 |
215 |
216 | def var(t):
217 | """
218 | Computes the variance of a :class:`Tensor`.
219 |
220 | :param t: a :class:`Tensor`
221 |
222 | :return: a scalar :math:`\ge 0`
223 | """
224 |
225 | return tn.normsq(t-tn.mean(t)) / t.numel()
226 |
227 |
228 | def std(t):
229 | """
230 | Computes the standard deviation of a :class:`Tensor`.
231 |
232 | :param t: a :class:`Tensor`
233 |
234 | :return: a scalar :math:`\ge 0`
235 | """
236 |
237 | return torch.sqrt(tn.var(t))
238 |
239 |
240 | def skew(t):
241 | """
242 | Computes the skewness of a :class:`Tensor`. Note: this function uses cross-approximation (:func:`tntorch.cross()`).
243 |
244 | :param t: a :class:`Tensor`
245 |
246 | :return: a scalar
247 | """
248 |
249 | return tn.mean(((t-tn.mean(t))/tn.std(t))**3)
250 |
251 |
252 | def kurtosis(t, fisher=True):
253 | """
254 | Computes the kurtosis of a :class:`Tensor`. Note: this function uses cross-approximation (:func:`tntorch.cross()`).
255 |
256 | :param t: a :class:`Tensor`
257 | :param fisher: if True (default) Fisher's definition is used, otherwise Pearson's (aka excess)
258 |
259 | :return: a scalar
260 | """
261 |
262 | return tn.mean(((t-tn.mean(t))/tn.std(t))**4) - fisher*3
263 |
264 |
265 | def normsq(t):
266 | """
267 | Computes the squared norm of a :class:`Tensor`.
268 |
269 | :param t: a :class:`Tensor`
270 |
271 | :return: a scalar :math:`\ge 0`
272 | """
273 |
274 | return tn.dot(t, t)
275 |
276 |
277 | def norm(t):
278 | """
279 | Computes the :math:`L^2` (Frobenius) norm of a tensor.
280 |
281 | :param t: a :class:`Tensor`
282 |
283 | :return: a scalar :math:`\ge 0`
284 | """
285 |
286 | return torch.sqrt(torch.clamp(tn.normsq(t), min=0))
287 |
--------------------------------------------------------------------------------
/tntorch/ops.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 |
4 |
5 | def cumsum(t, dim=None):
6 | """
7 | Computes the cumulative sum of a tensor along one or several dims, similarly to PyTorch's `cumsum()`.
8 |
9 | :param t: input :class:`Tensor`
10 | :param dim: an int or list of ints (default: all)
11 |
12 | :return: a :class:`Tensor` of the same shape
13 | """
14 |
15 | if dim is None:
16 | dim = range(t.dim())
17 | if not hasattr(dim, '__len__'):
18 | dim = [dim]
19 |
20 | t = t.clone()
21 | for n in dim:
22 | if t.Us[n] is None:
23 | t.cores[n] = torch.cumsum(t.cores[n], dim=-2)
24 | else:
25 | t.Us[n] = torch.cumsum(t.Us[n], dim=0)
26 | return t
27 |
28 |
29 | def cumprod(t, dim=None):
30 | """
31 | Computes the cumulative sum of a tensor along one or several dims, similarly to PyTorch's `cumprod()`.
32 |
33 | Note: this function is approximate and uses cross-approximation (:func:`tntorch.cross()`)
34 |
35 | :param t: input :class:`Tensor`
36 | :param dim: an int or list of ints (default: all)
37 |
38 | :return: a :class:`Tensor` of the same shape
39 | """
40 |
41 | return tn.exp(tn.cumsum(tn.log(t), dim=dim))
42 |
43 |
44 | """
45 | Unary operations (using cross-approximation)
46 | """
47 |
48 |
49 | def abs(t):
50 | """
51 | Element-wise absolute value computed using cross-approximation; see PyTorch's `abs()`.
52 |
53 | :param t: input :class:`Tensor`
54 |
55 | :return: a :class:`Tensor`
56 | """
57 |
58 | return tn.cross(lambda x: torch.abs(x), tensors=t, verbose=False)
59 |
60 |
61 | def acos(t):
62 | """
63 | Element-wise arccosine computed using cross-approximation; see PyTorch's `acos()`.
64 |
65 | :param t: input :class:`Tensor`s
66 |
67 | :return: a :class:`Tensor`
68 | """
69 |
70 | return tn.cross(lambda x: torch.acos(x), tensors=t, verbose=False)
71 |
72 |
73 | def asin(t):
74 | """
75 | Element-wise arcsine computed using cross-approximation; see PyTorch's `asin()`.
76 |
77 | :param t: input :class:`Tensor`
78 |
79 | :return: a :class:`Tensor`
80 | """
81 |
82 | return tn.cross(lambda x: torch.asin(x), tensors=t, verbose=False)
83 |
84 |
85 | def cos(t):
86 | """
87 | Element-wise cosine computed using cross-approximation; see PyTorch's `cos()`.
88 |
89 | :param t: input :class:`Tensor`
90 |
91 | :return: a :class:`Tensor`
92 | """
93 |
94 | return tn.cross(lambda x: torch.cos(x), tensors=t, verbose=False)
95 |
96 |
97 | def cosh(t):
98 | """
99 | Element-wise hyperbolic cosine computed using cross-approximation; see PyTorch's `cosh()`.
100 |
101 | :param t: input :class:`Tensor`
102 |
103 | :return: a :class:`Tensor`
104 | """
105 |
106 | return tn.cross(lambda x: torch.cosh(x), tensors=t, verbose=False)
107 |
108 |
109 | def erf(t):
110 | """
111 | Element-wise error function computed using cross-approximation; see PyTorch's `erf()`.
112 |
113 | :param t: input :class:`Tensor`
114 |
115 | :return: a :class:`Tensor`
116 | """
117 |
118 | return tn.cross(lambda x: torch.erf(x), tensors=t, verbose=False)
119 |
120 |
121 | def erfinv(t):
122 | """
123 | Element-wise inverse error function computed using cross-approximation; see PyTorch's `erfinv()`.
124 |
125 | :param t: input :class:`Tensor`
126 |
127 | :return: a :class:`Tensor`
128 | """
129 |
130 | return tn.cross(lambda x: torch.erfinv(x), tensors=t, verbose=False)
131 |
132 |
133 | def exp(t):
134 | """
135 | Element-wise exponentiation computed using cross-approximation; see PyTorch's `exp()`.
136 |
137 | :param t: input :class:`Tensor`
138 |
139 | :return: a :class:`Tensor`
140 | """
141 |
142 | return tn.cross(lambda x: torch.exp(x), tensors=t, verbose=False)
143 |
144 |
145 | def log(t):
146 | """
147 | Element-wise natural logarithm computed using cross-approximation; see PyTorch's `log()`.
148 |
149 | :param t: input :class:`Tensor`
150 |
151 | :return: a :class:`Tensor`
152 | """
153 |
154 | return tn.cross(lambda x: torch.log(x), tensors=t, verbose=False)
155 |
156 |
157 | def log10(t):
158 | """
159 | Element-wise base-10 logarithm computed using cross-approximation; see PyTorch's `log10()`.
160 |
161 | :param t: input :class:`Tensor`
162 |
163 | :return: a :class:`Tensor`
164 | """
165 |
166 | return tn.cross(lambda x: torch.log10(x), tensors=t, verbose=False)
167 |
168 |
169 | def log2(t):
170 | """
171 | Element-wise base-2 logarithm computed using cross-approximation; see PyTorch's `log2()`.
172 |
173 | :param t: input :class:`Tensor`
174 |
175 | :return: a :class:`Tensor`
176 | """
177 |
178 | return tn.cross(lambda x: torch.log2(x), tensors=t, verbose=False)
179 |
180 |
181 | def reciprocal(t):
182 | """
183 | Element-wise reciprocal computed using cross-approximation; see PyTorch's `reciprocal()`.
184 |
185 | :param t: input :class:`Tensor`
186 |
187 | :return: a :class:`Tensor`
188 | """
189 |
190 | return tn.cross(lambda x: torch.reciprocal(x), tensors=t, verbose=False)
191 |
192 |
193 | def rsqrt(t):
194 | """
195 | Element-wise square-root reciprocal computed using cross-approximation; see PyTorch's `rsqrt()`.
196 |
197 | :param t: input :class:`Tensor`
198 |
199 | :return: a :class:`Tensor`
200 | """
201 |
202 | return tn.cross(lambda x: torch.rsqrt(x), tensors=t, verbose=False)
203 |
204 |
205 | def sigmoid(t):
206 | """
207 | Element-wise sigmoid computed using cross-approximation; see PyTorch's `igmoid()`.
208 |
209 | :param t: input :class:`Tensor`
210 |
211 | :return: a :class:`Tensor`
212 | """
213 |
214 | return tn.cross(lambda x: torch.sigmoid(x), tensors=t, verbose=False)
215 |
216 |
217 | def sin(t):
218 | """
219 | Element-wise sine computed using cross-approximation; see PyTorch's `in()`.
220 |
221 | :param t: input :class:`Tensor`
222 |
223 | :return: a :class:`Tensor`
224 | """
225 |
226 | return tn.cross(lambda x: torch.sin(x), tensors=t, verbose=False)
227 |
228 |
229 | def sinh(t):
230 | """
231 | Element-wise hyperbolic sine computed using cross-approximation; see PyTorch's `inh()`.
232 |
233 | :param t: input :class:`Tensor`
234 |
235 | :return: a :class:`Tensor`
236 | """
237 |
238 | return tn.cross(lambda x: torch.sinh(x), tensors=t, verbose=False)
239 |
240 |
241 | def sqrt(t):
242 | """
243 | Element-wise square root computed using cross-approximation; see PyTorch's `qrt()`.
244 |
245 | :param t: input :class:`Tensor`
246 |
247 | :return: a :class:`Tensor`
248 | """
249 |
250 | return tn.cross(lambda x: torch.sqrt(x), tensors=t, verbose=False)
251 |
252 |
253 | def tan(t):
254 | """
255 | Element-wise tangent computed using cross-approximation; see PyTorch's `tan()`.
256 |
257 | :param t: input :class:`Tensor`
258 |
259 | :return: a :class:`Tensor`
260 | """
261 |
262 | return tn.cross(lambda x: torch.tan(x), tensors=t, verbose=False)
263 |
264 |
265 | def tanh(t):
266 | """
267 | Element-wise hyperbolic tangent computed using cross-approximation; see PyTorch's `tanh()`.
268 |
269 | :param t: input :class:`Tensor`
270 |
271 | :return: a :class:`Tensor`
272 | """
273 |
274 | return tn.cross(lambda x: torch.tanh(x), tensors=t, verbose=False)
275 |
276 |
277 | """
278 | Binary operations (using cross-approximation)
279 | """
280 |
281 |
282 | def add(t1, t2):
283 | """
284 | Element-wise addition computed using cross-approximation; see PyTorch's `add()`.
285 |
286 | :param t1: input :class:`Tensor`
287 | :param t2: input :class:`Tensor`
288 |
289 | :return: a :class:`Tensor`
290 | """
291 |
292 | return tn.cross(lambda x, y: torch.add(x, y), tensors=[t1, t2], verbose=False)
293 |
294 |
295 | def atan2(t1, t2):
296 | """
297 | Element-wise arctangent computed using cross-approximation; see PyTorch's `atan2()`.
298 |
299 | :param t1: input :class:`Tensor`
300 | :param t2: input :class:`Tensor`
301 |
302 | :return: a :class:`Tensor`
303 | """
304 |
305 | return tn.cross(lambda x, y: torch.atan2(x, y), tensors=[t1, t2], verbose=False)
306 |
307 |
308 | def div(t1, t2):
309 | """
310 | Element-wise division computed using cross-approximation; see PyTorch's `div()`.
311 |
312 | :param t1: input :class:`Tensor`
313 | :param t2: input :class:`Tensor`
314 |
315 | :return: a :class:`Tensor`
316 | """
317 |
318 | return t1/t2
319 |
320 |
321 | def mul(t1, t2):
322 | """
323 | Element-wise product computed using cross-approximation; see PyTorch's `mul()`.
324 |
325 | :param t1: input :class:`Tensor`
326 | :param t2: input :class:`Tensor`
327 |
328 | :return: a :class:`Tensor`
329 | """
330 |
331 | return tn.cross(lambda x, y: torch.mul(x, y), tensors=[t1, t2], verbose=False)
332 |
333 |
334 | def pow(t1, t2):
335 | """
336 | Element-wise power operation computed using cross-approximation; see PyTorch's `pow()`.
337 |
338 | :param t1: input :class:`Tensor`
339 | :param t2: input :class:`Tensor`
340 |
341 | :return: a :class:`Tensor`
342 | """
343 |
344 | return t1**t2
345 |
--------------------------------------------------------------------------------
/tntorch/round.py:
--------------------------------------------------------------------------------
1 | import tntorch as tn
2 | import torch
3 | import numpy as np
4 | import time
5 |
6 |
7 | def round_tt(t, **kwargs):
8 | """
9 | Copies and rounds a tensor (see :meth:`tensor.Tensor.round_tt()`.
10 |
11 | :param t: input :class:`Tensor`
12 | :param kwargs:
13 |
14 | :return: a rounded copy of `t`
15 | """
16 |
17 | t2 = t.clone()
18 | t2.round_tt(**kwargs)
19 | return t2
20 |
21 |
22 | def round_tucker(t, **kwargs):
23 | """
24 | Copies and rounds a tensor (see :meth:`tensor.Tensor.round_tucker()`.
25 |
26 | :param t: input :class:`Tensor`
27 | :param kwargs:
28 |
29 | :return: a rounded copy of `t`
30 | """
31 |
32 | t2 = t.clone()
33 | t2.round_tucker(**kwargs)
34 | return t2
35 |
36 |
37 | def round(t, **kwargs):
38 | """
39 | Copies and rounds a tensor (see :meth:`tensor.Tensor.round()`.
40 |
41 | :param t: input :class:`Tensor`
42 | :param kwargs:
43 |
44 | :return: a rounded copy of `t`
45 | """
46 |
47 | t2 = t.clone()
48 | t2.round(**kwargs)
49 | return t2
50 |
51 |
52 | def truncated_svd(M, delta=None, eps=None, rmax=None, left_ortho=True, algorithm='svd', verbose=False):
53 | """
54 | Decomposes a matrix M (size (m x n) in two factors U and V (sizes m x r and r x n) with bounded error (or given r).
55 |
56 | :param M: a matrix
57 | :param delta: if provided, maximum error norm
58 | :param eps: if provided, maximum relative error
59 | :param rmax: optionally, maximum r
60 | :param left_ortho: if True (default), U will be orthonormal. If False, V will
61 | :param algorithm: 'svd' (default) or 'eig'. The latter is often faster, but less accurate
62 | :param verbose: Boolean
63 |
64 | :return: U, V
65 | """
66 |
67 | if delta is not None and eps is not None:
68 | raise ValueError('Provide either `delta` or `eps`')
69 | if delta is None and eps is not None:
70 | delta = eps*torch.norm(M).item()
71 | if delta is None and eps is None:
72 | delta = 0
73 | if rmax is None:
74 | rmax = np.iinfo(np.int32).max
75 | assert rmax >= 1
76 | assert algorithm in ('svd', 'eig')
77 |
78 | if algorithm == 'svd':
79 | start = time.time()
80 | svd = torch.svd(M)[:2]
81 | singular_vectors = 'left'
82 | if verbose:
83 | print('Time (SVD):', time.time() - start)
84 | else:
85 | start = time.time()
86 | if M.shape[0] <= M.shape[1]:
87 | gram = torch.mm(M, M.permute(1, 0))
88 | singular_vectors = 'left'
89 | else:
90 | gram = torch.mm(M.permute(1, 0), M)
91 | singular_vectors = 'right'
92 | if verbose:
93 | print('Time (gram):', time.time() - start)
94 | start = time.time()
95 | w, v = torch.symeig(gram, eigenvectors=True)
96 | if verbose:
97 | print('Time (symmetric EIG):', time.time() - start)
98 | w[w < 0] = 0
99 | w = torch.sqrt(w)
100 | svd = [v, w]
101 | # Sort eigenvalues and eigenvectors in decreasing importance
102 | reverse = np.arange(len(svd[1])-1, -1, -1)
103 | idx = np.argsort(svd[1])[reverse]
104 | svd[0] = svd[0][:, idx]
105 | svd[1] = svd[1][idx]
106 |
107 | if svd[1][0] < 1e-13: # Special case: M = zero -> rank is 1
108 | return torch.zeros([M.shape[0], 1]), torch.zeros([1, M.shape[1]])
109 |
110 | S = svd[1]**2
111 | reverse = np.arange(len(S)-1, -1, -1)
112 | where = np.where((torch.cumsum(S[reverse], dim=0) <= delta**2).to('cpu'))[0]
113 | if len(where) == 0:
114 | rank = max(1, int(min(rmax, len(S))))
115 | else:
116 | rank = max(1, int(min(rmax, len(S) - 1 - where[-1])))
117 | left = svd[0]
118 | left = left[:, :rank]
119 |
120 | start = time.time()
121 | if singular_vectors == 'left':
122 | if left_ortho:
123 | M2 = torch.mm(left.permute(1, 0), M)
124 | else:
125 | M2 = torch.mm((1. / svd[1][:rank])[:, None]*left.permute(1, 0), M)
126 | left = left*svd[1][:rank]
127 | else:
128 | if left_ortho:
129 | M2 = torch.mm(M, (left * (1. / svd[1][:rank])[None, :]))
130 | left, M2 = M2, torch.mm(left, (torch.diag(svd[1][:rank]))).permute(1, 0)
131 | else:
132 | M2 = torch.mm(M, left)
133 | left, M2 = M2, left.permute(1, 0)
134 | if verbose:
135 | print('Time (product):', time.time() - start)
136 |
137 | return left, M2
138 |
--------------------------------------------------------------------------------
/tutorials:
--------------------------------------------------------------------------------
1 | docs/tutorials/
--------------------------------------------------------------------------------