├── .gitignore ├── 2111_Como-intro-to-deeplearning ├── 1-start.ipynb ├── 1c-autoreg.ipynb ├── 2a-mnist.ipynb ├── 2b-generative.ipynb ├── 3a-policy-gradient.ipynb ├── 3b-policy-gradient-harder.ipynb ├── 3c-cartpole.ipynb ├── 4-nqs-ising.ipynb ├── README.md ├── images │ ├── autoreg_deep.png │ ├── lake_init.jpg │ └── mnist.png └── vae_utils_2.py ├── 2202_NetKet ├── 01_intro.ipynb ├── 02_netket_orig.ipynb ├── 02_unitary_dynamics.ipynb └── README.md ├── 2204_Toulouse-jax-netket ├── 1-start.ipynb ├── 2-nqs-ising.ipynb └── 3-nqs-dynamics.ipynb ├── 2206_Julia ├── 1_projects │ ├── Manifest.toml │ ├── Project.toml │ ├── environments.ipynb │ └── sharedproject │ │ ├── Manifest.toml │ │ ├── Project.toml │ │ ├── code.ipynb │ │ └── code.jl └── 2_linear_algebra │ ├── 1_linalg.ipynb │ └── 2_ed_quantum_ising.ipynb ├── 2209_Munich ├── Dynamics.ipynb ├── images │ ├── cqsl.jpg │ ├── epfl.png │ ├── epfl.svg │ ├── netket_web.png │ ├── nk_authors.png │ └── nk_commits.png ├── poetry.lock └── pyproject.toml ├── 2301_Pisa ├── 1-jax.ipynb ├── 2-nqs-ising.ipynb ├── 3-dynamics.ipynb └── images │ ├── cqsl.jpg │ ├── epfl.png │ ├── epfl.svg │ ├── netket_web.png │ ├── nk_authors.png │ └── nk_commits.png ├── 2304_Roscoff └── variational_intro.ipynb ├── 2307_Trento ├── 1_building.ipynb ├── 1_building_sol.ipynb └── 2_qgt_dynamics.ipynb ├── 2404_ICTP ├── 1-vmc-from-scratch-nosolution.ipynb └── 2_qgt_dynamics.ipynb ├── 2406_LesHouches └── 1-tutorial_vmc.ipynb ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | netket.egg-info/ 6 | pip-wheel-metadata 7 | .ipynb_checkpoints 8 | .pytest_cache 9 | .mypy_cache 10 | .virtual_documents 11 | oldest_requirements.txt 12 | 13 | .mpack 14 | .DS_STORE 15 | 16 | # C extensions 17 | *.so 18 | 19 | 20 | 21 | # Distribution / packaging 22 | .Python 23 | build/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib/ 30 | lib64/ 31 | parts/ 32 | sdist/ 33 | var/ 34 | wheels/ 35 | pip-wheel-metadata/ 36 | share/python-wheels/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | MANIFEST 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .nox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *.cover 62 | *.py,cover 63 | .hypothesis/ 64 | .pytest_cache/ 65 | 66 | # Translations 67 | *.mo 68 | *.pot 69 | 70 | # Django stuff: 71 | *.log 72 | local_settings.py 73 | db.sqlite3 74 | db.sqlite3-journal 75 | 76 | # Flask stuff: 77 | instance/ 78 | .webassets-cache 79 | 80 | # Scrapy stuff: 81 | .scrapy 82 | 83 | # Sphinx documentation 84 | docs/_build/ 85 | 86 | # PyBuilder 87 | target/ 88 | 89 | # Jupyter Notebook 90 | .ipynb_checkpoints 91 | 92 | # IPython 93 | profile_default/ 94 | ipython_config.py 95 | 96 | # pyenv 97 | .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 107 | __pypackages__/ 108 | 109 | # Celery stuff 110 | celerybeat-schedule 111 | celerybeat.pid 112 | 113 | # SageMath parsed files 114 | *.sage.py 115 | 116 | # Environments 117 | .env 118 | .venv 119 | env/ 120 | venv/ 121 | ENV/ 122 | env.bak/ 123 | venv.bak/ 124 | 125 | # Spyder project settings 126 | .spyderproject 127 | .spyproject 128 | 129 | # Rope project settings 130 | .ropeproject 131 | 132 | # mkdocs documentation 133 | /site 134 | 135 | # mypy 136 | .mypy_cache/ 137 | .dmypy.json 138 | dmypy.json 139 | 140 | # Pyre type checker 141 | .pyre/ 142 | -------------------------------------------------------------------------------- /2111_Como-intro-to-deeplearning/1c-autoreg.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "6387de74-7f19-446c-9c10-3d201a270a87", 6 | "metadata": {}, 7 | "source": [ 8 | "# Introduction to Deep Learning\n", 9 | "\n", 10 | "### Hands-on 2a: MNIST\n", 11 | "Filippo Vicentini and Giuseppe Carleo\n", 12 | "\n", 13 | "The objective of this hands-on is to write and optimise an image-classifier that identifies handwritten digits.\n", 14 | "\n", 15 | "We will use for this the MNIST dataset\n", 16 | "\n", 17 | "![title](images/mnist.png)" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 79, 23 | "id": "d7895a56-6b30-49bc-b07e-1fe5a6a00b09", 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "# Requirements\n", 28 | "#!pip install tensorflow_datasets flax jax optax" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "id": "561535f1-4d9a-4908-bbb7-e63c94d22d47", 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "# Utility functions (don't worry. you don't need to understand this one)\n", 39 | "\n", 40 | "from matplotlib import pyplot as plt\n", 41 | "\n", 42 | "def show_img(img, ax=None, title=None):\n", 43 | " \"\"\"Shows a single image.\"\"\"\n", 44 | " if ax is None:\n", 45 | " ax = plt.gca()\n", 46 | " ax.imshow(img[..., 0], cmap='gray')\n", 47 | " ax.set_xticks([])\n", 48 | " ax.set_yticks([])\n", 49 | " if title:\n", 50 | " ax.set_title(title)\n", 51 | "\n", 52 | "def show_img_grid(imgs, titles):\n", 53 | " \"\"\"Shows a grid of images.\"\"\"\n", 54 | " n = int(np.ceil(len(imgs)**.5))\n", 55 | " _, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))\n", 56 | " for i, (img, title) in enumerate(zip(imgs, titles)):\n", 57 | " show_img(img, axs[i // n][i % n], title)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 3, 63 | "id": "241cfd9e-6a5e-4257-b3b5-2e7bad914a5e", 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "# Import \n", 68 | "import jax\n", 69 | "import jax.numpy as jnp\n", 70 | "\n", 71 | "import numpy as np" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "id": "2b4e0f68-c1d0-4ce1-9ad1-65b6e47684d0", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "55c5988d-c3c3-43d0-abc8-6ceb1c09054d", 85 | "metadata": {}, 86 | "source": [ 87 | "## 1 - Setting up the dataset\n", 88 | "First of all, we need to download the dataset.\n", 89 | "\n", 90 | "The MNIST dataset is a standard dataset composed of several 28x28 black/white images representing numbers, and a label corresponding to the number that is represented there." 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 4, 96 | "id": "90e05be1-0fd8-4562-8251-f8caa0576d3e", 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "# We use Tensorflow datasets to download and import data in a simple numpy-tensor format\n", 101 | "# It's just handy. You could use anything else.\n", 102 | "\n", 103 | "import tensorflow_datasets as tfds\n", 104 | "\n", 105 | "# Specify the dataset we are interested in\n", 106 | "ds_builder = tfds.builder('mnist')\n", 107 | "# Download the data\n", 108 | "ds_builder.download_and_prepare()\n", 109 | "# Get the whole dataset's train set\n", 110 | "train_ds = tfds.as_numpy(ds_builder.as_dataset(split='train', batch_size=-1))\n", 111 | "test_ds = tfds.as_numpy(ds_builder.as_dataset(split='test', batch_size=-1))\n", 112 | "\n", 113 | "train_ds['image'] = jnp.float32(train_ds['image']) / 255.\n", 114 | "test_ds['image'] = jnp.float32(test_ds['image']) / 255.\n", 115 | "\n", 116 | "# convert to bool\n", 117 | "train_ds['image'] = train_ds['image']>=0.5\n", 118 | "test_ds['image'] = test_ds['image']>=0.5" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "0a7b5869-071c-4431-b830-fd837948577d", 124 | "metadata": {}, 125 | "source": [ 126 | "The dataset is split into two sub-sets: the training dataset that we will use to 'train' our model, and the 'test' dataset, which the model *never sees* during training, but that we use to check that the model performs well.\n", 127 | "\n", 128 | "This is to verify that the model does not simply learn _by heart_ the images in the training dataset, but that it actually _learns_ to generalize and works correctly with images that he did not see before.\n", 129 | "\n", 130 | "We can inspect the shape of the training dataset:" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 5, 136 | "id": "dc6521ad-6be7-46ab-9cf4-28d24da2d5d9", 137 | "metadata": {}, 138 | "outputs": [ 139 | { 140 | "name": "stdout", 141 | "output_type": "stream", 142 | "text": [ 143 | "dataset keys: dict_keys(['image', 'label'])\n", 144 | "The training dataset has shape: (60000, 28, 28, 1) and dtype bool\n", 145 | "The test dataset has shape: (10000, 28, 28, 1) and dtype bool\n", 146 | "\n", 147 | "The training labels have shape: (60000,) and dtype int64\n", 148 | "The test labels have shape: (10000,) and dtype int64\n" 149 | ] 150 | } 151 | ], 152 | "source": [ 153 | "print(\"dataset keys:\", train_ds.keys())\n", 154 | "print(f\"The training dataset has shape: {train_ds['image'].shape} and dtype {train_ds['image'].dtype}\")\n", 155 | "print(f\"The test dataset has shape: {test_ds['image'].shape} and dtype {train_ds['image'].dtype}\")\n", 156 | "print(\"\")\n", 157 | "print(f\"The training labels have shape: {train_ds['label'].shape} and dtype {train_ds['label'].dtype}\")\n", 158 | "print(f\"The test labels have shape: {test_ds['label'].shape} and dtype {test_ds['label'].dtype}\")" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "id": "15665ab5-cb0c-4478-a48e-e0515cbe4fef", 164 | "metadata": {}, 165 | "source": [ 166 | "We can visualize it to understand it a bit more, using an utility function" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 6, 172 | "id": "e6146cb7-be9d-4206-8b60-ae27699f0444", 173 | "metadata": {}, 174 | "outputs": [ 175 | { 176 | "data": { 177 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgQAAAILCAYAAACXVIRDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAAVv0lEQVR4nO3df6isdZ0H8PfnoqZbaW4JrWLXQEMQKiKE6Acb3gV1WcygWGqpuC0rSWy1q2D+t7C4WPRH1JJLFFJ/pBSKlIlB+IegULRbZNSy4d6LrhndVdNbVuR+94+Zi9O5c2zmPHNmnmee1wsemDl35jnfOX7O8X2/9z3PqdZaAIBxO7DpBQAAmycQAAACAQAgEAAAEQgAgAgEAEBGGAiq6khVHVrgca2qLtzj59jzc2ER5pihM8P9M7pA0HdVdVpV/biqHt30WmBZVfX2qrqvqn5ZVUc2vR5YVk3cXFX/Oz1urqra9LrWQSDon+uT/GLTi4A9+lWSL2YyxzBEf5fkHUlel+S1Sf4qyTWbXNC6jDYQVNWlVfVgVT1VVT+rqs9W1Wk7HnZlVT1cVceq6pNVdWDm+Yenf5N/sqruraqDK1jTq5P8TZJ/6XouxqFvc9xa+05r7ctJHu5yHsajbzOc5P1JPtVae7S19j9JPpXkAx3POQijDQRJnkvysSSvSPKmJJcluXbHY65O8sYkb0hyVZLDSVJVVyW5Mck7k5yT5P4kX5n3Sarqhumgzz12PPwz0/M+u4LXxzj0cY5hGX2b4UuS/GDm/g+mH9t+rbVRHUmOJDk05+MfTXLnzP2W5PKZ+9cm+fb09j1JPjjzZweS/DrJwZnnXrjkuq5Ocs/09p8neXTTXytHf4++zvHMuQ4lObLpr5Ojv0dfZziTgHLxzP2LpuepTX/N9vsY7Q5BVb2mqr5RVY9X1dNJbsokoc56ZOb20STnTm8fTPLpmWT5RJJKct4e1/LiJJ9I8vd7eT7j1ac5hr3o4QwfT3LmzP0zkxxv03SwzUYbCJJ8LslPklzUWjszk22nnU3S82duvyrJY9PbjyS5prX2spnjjNbaAzs/SVXdWFXHdzumD7soyQVJ7q+qx5PckeTPpt8gF6zqBbOV+jTHsBd9m+EfZVIoPOF1049tvTEHgpcmeTrJ8aq6OMmH5jzm+qo6u6rOT/KRJLdPP35Lko9X1SVJUlVnVdW75n2S1tpNrbWX7HZMH/ZQJgP/+unxt0l+Pr39yJzTwgl9muNU1YGqOj3JqZO7dfqcghjM6tUMJ/lSkn+oqvOq6twk/5jk1pW80p4bcyC4Lsl7kjyT5PN5fsBm3ZXke0m+n+TuJF9IktbanUluTnLbdIvroSRX7HUhrbXft9YeP3Fksu31f9P7z+31vIxCb+Z46m2ZlGK/mcnf5J5N8q2O52S79W2G/y3J15P8cHq+u6cf23o1gn8WAQD+iDHvEAAAUwIBACAQAAACAQAQgQAASHLKMg+uKm9JoItjrbVzNrkAM0xHZpih23WG7RCwTkc3vQDoyAwzdLvOsEAAAAgEAIBAAABEIAAAIhAAABEIAIAIBABABAIAIAIBABCBAACIQAAARCAAACIQAABZ8tcfA8A2aK3bb5GuqhWtpD/sEAAAAgEAIBAAABEIAIAIBABAvMsAAJY2710KQ3/ngR0CAEAgAAAEAgAgAgEAEKXC3tvG4grbaZlLwZph1qnrZYrHwg4BACAQAAACAQAQgQAAiFJhbyi9MCTmlb7a5GwOvQRuhwAAEAgAAIEAAIhAAABEqRCAgVJuXS07BACAQAAACAQAQAQCACBKhRuhCMOQmFfGYrerCo7le8AOAQAgEAAAAgEAEIEAAMiIS4W7lUT69qsq+7YeWJYZZhVWXewzlyezQwAACAQAgEAAAEQgAAAy4lIhAP20TQXCea+lr4VGOwQAgEAAAAgEAEAEAgAgAgEAEO8yAGaM5fe+0w/7MW99bfAPgR0CAEAgAAAEAgAgAgEAEKXCfaekxVgoc/FCxvqzcEjfF3YIAACBAAAQCACACAQAQEZSKhxCmWVIxROAPvBzc7XsEAAAAgEAIBAAABEIAICMpFS4jCEUEAH6zK81HiY7BACAQAAACAQAQAQCACBKhRuhHEMfdCl+mWHYPnYIAACBAAAQCACACAQAQLawVNj1ClldylKucgiMzTZdlXDsP8PtEAAAAgEAIBAAABEIAIBsYanQFdTgZK5KyCqMvXS37ewQAAACAQAgEAAAEQgAgAgEAEC28F0G67Jo21ZDG9gW836eDfWdB9t0yeVVsUMAAAgEAIBAAABEIAAAolQIwADMKwFuU8mxD+wQAAACAQAgEAAAEQgAgCgVwtbpUqoa+pXW2A6LzvC6CoRj+b6wQwAACAQAgEAAAEQgAACiVLgQV74C2H5jKQ/uxg4BACAQAAACAQAQgQAAiFIhAB0sWsRzVcH+s0MAAAgEAIBAAABEIAAAolS4UsosDIl5ZZ3MW//ZIQAABAIAQCAAACIQAAARCACACAQAQAQCACACAQAQgQAAiEAAAMSlixfikpsMiXkF9sIOAQAgEAAAAgEAEIEAAIhAAABEIAAAIhAAABEIAIAIBABABAIAIAIBABCBAACIQAAARCAAALL8rz8+luTofiyEUTi46QXEDNONGWbodp3haq2tcyEAQA/5JwMAQCAAAAQCACACAQAQgQAAiEAAAEQgAAAiEAAAEQgAgAgEAEAEAgAgIwwEVXWkqg4t8LhWVRfu8XPs+bmwCHPM0Jnh/hldIOirqrqnqo7PHL+rqh9uel2wjKp6UVXdUlU/r6onqurrVXXeptcFi6qqt1fVfVX1y6o6sun1rJNA0BOttStaay85cSR5IMlXN70uWNJHkrwpyWuTnJvkySSf2eiKYDm/SvLFJNdveiHrNtpAUFWXVtWDVfVUVf2sqj5bVafteNiVVfVwVR2rqk9W1YGZ5x+uqh9X1ZNVdW9Vrez3pFfVBUnemuRLqzon26mHc/zqJPe21n7eWvtNktuTXNLxnGyxvs1wa+07rbUvJ3m4y3mGaLSBIMlzST6W5BWZ/I3msiTX7njM1UnemOQNSa5KcjhJquqqJDcmeWeSc5Lcn+Qr8z5JVd0wHfS5xy5re1+S+1trRzq8Psahb3P8hSRvrqpzq+pPkrw3yT2realsqb7N8Hi11kZ1JDmS5NCcj380yZ0z91uSy2fuX5vk29Pb9yT54MyfHUjy6yQHZ557YYc1/jTJBzb9tXL09+jrHCc5K8lt0+f+Psl/JPnTTX+9HP07+jrDM+c6lOTIpr9O6zxGu0NQVa+pqm9U1eNV9XSSmzJJqLMembl9NJN/E02Sg0k+PZMsn0hSSTqXp6rqLUlemeRrXc/F9uvhHP9rkhcleXmSFye5I3YIeAE9nOHRGm0gSPK5JD9JclFr7cxMtp1qx2POn7n9qiSPTW8/kuSa1trLZo4zWmsP7PwkVXXjjncP/MExZ13vT3JHa23en8FOfZvj1ye5tbX2RGvtt5kUCi+tqp0/4OGEvs3waI05ELw0ydNJjlfVxUk+NOcx11fV2VV1fibt6dunH78lycer6pIkqaqzqupd8z5Ja+2mNvPugZ3H7GOr6owk705y60peIWPQtzn+bpL3Tc91aibbu4+11o6t5uWyhXo1w1V1oKpOT3Lq5G6dPqfkuJXGHAiuS/KeJM8k+XyeH7BZdyX5XpLvJ7k7k8JUWmt3Jrk5yW3TLa6HklyxgjW9I8lTSe5bwbkYh77N8XVJfpPkv5L8IsmVmRTCYDd9m+G3JXk2yTcz2Y14Nsm3Op5zEGpangAARmzMOwQAwJRAAAAIBACAQAAARCAAAJKcssyDq8pbEujiWGvtnE0uwAzTkRlm6HadYTsErNPRTS8AOjLDDN2uMywQAAACAQAgEAAAEQgAgAgEAEAEAgAgAgEAEIEAAIhAAABEIAAAIhAAABEIAIAIBABABAIAIAIBABCBAACIQAAARCAAACIQAABJTtn0AoDxaa0t9Liq2ueVMFaLzmAynjm0QwAACAQAgEAAAEQgAACiVAijtUypCsZs3vfKNhYN7RAAAAIBACAQAAARCACAjKRU2LU8tY3lEYZPKRAW43tlMXYIAACBAAAQCACACAQAQEZSKuxqLFepoh82WYBadK6VtGD72CEAAAQCAEAgAAAiEAAA2cJS4brKTkMoVSk+9t9+zJH/7rBaY/meskMAAAgEAIBAAABEIAAAMvBS4RCKfZvkCov90mVeh/Dfbbc1Lvq6h/Aa6b9t/z7bT3YIAACBAAAQCACACAQAQAZUKlx1gXDI5RFlyu02hNkcwhqB5dghAAAEAgBAIAAAIhAAABEIAIAM6F0GXWhEs25jfCfIGF8zm2XmVssOAQAgEAAAAgEAEIEAAMhISoWwbvOKrIsWoNZVlNpk2VbRl00zgyezQwAACAQAgEAAAEQgAAAyoFLhWAsgrsS1PboUDfeD2QJm2SEAAAQCAEAgAAAiEAAAGVCpcNvtR8FrrEXMIdmP/0ZDKAsuukYzzAlDmOuhs0MAAAgEAIBAAABEIAAAolS4Ecox7KcuRbwus7nb5+1yznnPVTRkWWZmMXYIAACBAAAQCACACAQAQJQKt4bSDC9k1UXWZeZt0ccq28Jm2SEAAAQCAEAgAAAiEAAAUSpcqXWVohQIeSGbLBACw2WHAAAQCAAAgQAAiEAAAEQgAADiXQZ75h0FbNp+zOC65m3RtZv/cVr1bM87n9k6mR0CAEAgAAAEAgAgAgEAEKXChbgULPtFORXoCzsEAIBAAAAIBABABAIAIEqFJ1EgZL+MdbZclRCGwQ4BACAQAAACAQAQgQAAyIhLhUP+1bH03xgLhOu66iIsawjfP31ghwAAEAgAAIEAAIhAAABkxKVC2E/zSkxdSnfbVthT8oL+sUMAAAgEAIBAAABEIAAAolS4Z0pRLGvRmRlqgdD3BKvSpZRrDvfODgEAIBAAAAIBABCBAACIUuFClFRYJ/MGJ/N9sf/sEAAAAgEAIBAAABEIAIAIBABABAIAIAIBABCBAACIQAAARCAAADLiSxe7DCYAPM8OAQAgEAAAAgEAEIEAAIhAAABEIAAAIhAAABEIAIAIBABAlr9S4bEkR/djIYzCwU0vIGaYbswwQ7frDFdrbZ0LAQB6yD8ZAAACAQAgEAAAEQgAgAgEAEAEAgAgAgEAEIEAAIhAAABEIAAAIhAAABlhIKiqI1V1aIHHtaq6cI+fY8/PhUWYY4bODPfP6AJBX1XV9VX1UFU9U1X/XVXXb3pNsKyquqeqjs8cv6uqH256XbCoqnp7Vd1XVb+sqiObXs86CQT9UUnel+TsJJcn+XBV/fVmlwTLaa1d0Vp7yYkjyQNJvrrpdcESfpXki0lG95ey0QaCqrq0qh6sqqeq6mdV9dmqOm3Hw66sqoer6lhVfbKqDsw8/3BV/biqnqyqe6uq0+9Jb619orX2762137fW/jPJXUne3OWcbL++zfGOtV2Q5K1JvrSqc7J9+jbDrbXvtNa+nOThLucZotEGgiTPJflYklckeVOSy5Jcu+MxVyd5Y5I3JLkqyeEkqaqrktyY5J1Jzklyf5KvzPskVXXDdNDnHrs8pzL5Qfqjbi+REejtHGey43V/a+1Ih9fH9uvzDI9La21UR5IjSQ7N+fhHk9w5c78luXzm/rVJvj29fU+SD8782YEkv05ycOa5F3ZY4z8l+UGSF2366+Xo5zGQOf5pkg9s+mvl6OfR9xlOcijJkU1/ndZ5jHaHoKpeU1XfqKrHq+rpJDdlklBnPTJz+2iSc6e3Dyb59EyyfCKTDsB5K1jXhzP5m9VfttZ+2/V8bLcez/Fbkrwyyde6novt1tcZHqPRBoIkn0vykyQXtdbOzGTbqXY85vyZ269K8tj09iNJrmmtvWzmOKO19sDOT1JVN+5oXf/BseOxh5PckOSy1tqjK3qdbLfezfHU+5Pc0Vqb92cwq68zPDpjDgQvTfJ0kuNVdXGSD815zPVVdXZVnZ/kI0lun378liQfr6pLkqSqzqqqd837JK21m9pM63rnceJxVfXeTJLxX7TWRldmYc96NcfT85yR5N1Jbl3JK2Tb9WqGq+pAVZ2e5NTJ3Tp9TslxK405EFyX5D1Jnkny+Tw/YLPuSvK9JN9PcneSLyRJa+3OJDcnuW26xfVQkis6ruefk7w8yXdnUustHc/J9uvbHCfJO5I8leS+FZyL7de3GX5bkmeTfDOT3Yhnk3yr4zkHoablCQBgxMa8QwAATAkEAIBAAAAIBABAklOWeXBVaSDSxbHW2jmbXIAZpiMzzNDtOsN2CFino5teAHRkhhm6XWdYIAAABAIAQCAAACIQAAARCACACAQAQAQCACACAQAQgQAAiEAAAEQgAAAiEAAAEQgAgAgEAEAEAgAgAgEAEIEAAIhAAABEIAAAkpyy6QWsWmut0/OrakUrAWBbLfr/miH9P8UOAQAgEAAAAgEAEIEAAMjAS4VdC4TrOOeQCiUAjJcdAgBAIAAABAIAIAIBAJCBlwqHYF5JUdGQIdnGK7LBMvajwN5HdggAAIEAABAIAIAIBABABlQq7FLq2I+yU5f1KBrSB6suSu12PrPNUIylPLgbOwQAgEAAAAgEAEAEAgAgAyoV9q2YNG89Yy+ksDwzA5uxH997ffv/1LLsEAAAAgEAIBAAABEIAIAMqFQ4BF2Khq7yBjAc2/iz2Q4BACAQAAACAQAQgQAAiFIhbNQQikmupsjQmeHF2CEAAAQCAEAgAAAiEAAAEQgAgHiXwb7rcjljGJIhvGMC9mIss22HAAAQCAAAgQAAiEAAAESpsPfmFRDHUnBhWHYry5pX1qlLaXvss2qHAAAQCAAAgQAAiEAAAESpEFiRsReyWD9XfV0tOwQAgEAAAAgEAEAEAgAgSoW9p6jFOilpMRZ+tp7MDgEAIBAAAAIBABCBAADIwEuF+1GAWnXRREmLvlr1bC5zPoUuluXXGu8/OwQAgEAAAAgEAEAEAgAgAyoVrqucp7hCHymnMibmfTPsEAAAAgEAIBAAABEIAIAMqFQ4BIow7Jd5hdW+zZtSLX1gDvfODgEAIBAAAAIBABCBAACIQAAAZEDvMlh1y3qZJuom29x9a5LPo9W7GfvxdR/CvLE9zFu/2CEAAAQCAEAgAAAiEAAAGVCpcJ4uRUNllr1RIAT6ws+j1bJDAAAIBACAQAAARCAAADLwUuE8ffu98Zu8mpzCDdAXitz9Z4cAABAIAACBAACIQAAAZAtLhfNsW7lu214Pw2MGWTczt//sEAAAAgEAIBAAABEIAIAIBABABAIAIAIBABCBAACIQAAAZCRXKgRO5tfRArPsEAAAAgEAIBAAABEIAIAoFQKwBn59cf/ZIQAABAIAQCAAACIQAAARCACAeJcB8Edoh8M42CEAAAQCAEAgAAAiEAAAUSqE0VIWBGbZIQAABAIAQCAAACIQAAARCACACAQAQAQCACACAQAQgQAAyPJXKjyW5Oh+LIRROLjpBcQM040ZZuh2neFqra1zIQBAD/knAwBAIAAABAIAIAIBABCBAACIQAAARCAAACIQAAARCACAJP8Pv5eHlzYxbYUAAAAASUVORK5CYII=\n", 178 | "text/plain": [ 179 | "
" 180 | ] 181 | }, 182 | "metadata": {}, 183 | "output_type": "display_data" 184 | } 185 | ], 186 | "source": [ 187 | "show_img_grid(\n", 188 | " [train_ds['image'][idx] for idx in range(9)],\n", 189 | " [f'label={train_ds[\"label\"][idx]}' for idx in range(9)],\n", 190 | ")" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "id": "73c9f1e8-b449-4589-a826-3dd5771db7ce", 196 | "metadata": {}, 197 | "source": [ 198 | "We have seen that the data is stored in uint8 (an *unsigned* 8-bit integer which can take values from 0 to 255 ).\n", 199 | "\n", 200 | "However it is often preferable when working with Neural Networks to work with floating-point values with values around 0 and variance approximately 1. The reasons are 2:\n", 201 | "\n", 202 | " - modern CPUs (and to an extent GPUs) are often faster at working with batches (blocks) of floating-point numbers rather than integers [caveats apply]\n", 203 | " - Many nonlinear functions used in machine-learning have the nonlinear crossover aroud ~0 or ~1/2, so we want our data to be spread around those values\n", 204 | " - Most research about how to initialize neural-network layers assumes that the input data has mean 0 and variance 1, so to exploit those results we have to rescale our data" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "0bf695fb-0fab-49f3-81fd-9a23fc7ea311", 210 | "metadata": {}, 211 | "source": [] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "id": "98e56e14-2a4e-4fad-9d47-809ba4b0a354", 216 | "metadata": {}, 217 | "source": [ 218 | "## 2 - The model (Neural Network)\n", 219 | "\n", 220 | "We want now to define the Model.\n", 221 | "We will use Flax to do that.\n", 222 | "\n", 223 | "We want our network to return a probability distribution for the input to correspond to one of several output labels.\n", 224 | "\n", 225 | "e.g: if $x$ is an image, then $f : \\mathbb{R}^{28\\times 28}\\rightarrow \\mathbb{R}^{10}$ and $f^{(i)}(x)$ is the probability that the image $x$ represents a $i\\in[0,9]$\n", 226 | "\n", 227 | "To make the output of the network a probability distribution, we can use a softmax function, defined as\n", 228 | "\n", 229 | "$$\n", 230 | "\\sigma_i(x) = \\frac{e^{x_i}}{\\sum_i^K e^{x_i} } \\text{ for } i\\in [1,K] \\text{ and } x\\in\\mathbb{R}^K\n", 231 | "$$\n", 232 | "\n", 233 | "We want to use a Feedforward network with 2 Dense Layers, relu-nonlinearity and output softmax using Flax.\n" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": 234, 239 | "id": "8af5cf83-b3e2-4a1d-b204-73f383952134", 240 | "metadata": {}, 241 | "outputs": [], 242 | "source": [ 243 | "import jax\n", 244 | "import jax.numpy as jnp\n", 245 | "\n", 246 | "# We import flax.linen as nn\n", 247 | "# The reason is that flax.nn is old and deprecated and will be removed one day\n", 248 | "import flax.linen as nn" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 268, 254 | "id": "7bf8a1ee-844e-4342-8578-6a0df21cc422", 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "# A Flax model must be a class sub-classing nn.Module\n", 259 | "class Model(nn.Module):\n", 260 | " \n", 261 | " # We can have some attributes of the Model. \n", 262 | " # Those are considered compile-time constants and must be hashable\n", 263 | " # They are useful to define some variables that might be changed often\n", 264 | " hidden_width : int = 1024\n", 265 | " \"\"\"\n", 266 | " The width of the hidden dense layers in the neural network.\n", 267 | " \"\"\"\n", 268 | " n_outputs : int = 10\n", 269 | " \"\"\"\n", 270 | " Number of output classes for the classifier \n", 271 | " \"\"\"\n", 272 | " \n", 273 | " # The body of the model must be defined using the `@nn.compact` decorator.\n", 274 | " # Just think of it as boilerplate, and if you are curious, check out\n", 275 | " # Flax documentation\n", 276 | " @nn.compact\n", 277 | " def __call__(self, x):\n", 278 | " \"\"\"\n", 279 | " This function should evaluate the result of the model for an input image\n", 280 | " x or a batch of images x.\n", 281 | " \n", 282 | " x has shape (28,28,1) or (N, 28, 28, 1)\n", 283 | " \"\"\"\n", 284 | " # we first ensure a single image is a 4-tensor\n", 285 | " if x.ndim == 3:\n", 286 | " x = x.reshape((1, ) + x.shape)\n", 287 | " \n", 288 | " # We first \"vectorize\" the image\n", 289 | " x = x.reshape((x.shape[0], -1))\n", 290 | " \n", 291 | " # First dense layer\n", 292 | " x = nn.Dense(features=self.hidden_width)(x)\n", 293 | " # First nonlinear activation function\n", 294 | " x = nn.relu(x)\n", 295 | " #x = nn.Dense(features=self.hidden_width)(x)\n", 296 | " #x = nn.relu(x)\n", 297 | " x = nn.Dense(features=self.n_outputs)(x)\n", 298 | " x = nn.log_softmax(x)\n", 299 | " return x" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "id": "0b1324af-5409-4a2c-9fc3-1ae9e09e009b", 305 | "metadata": {}, 306 | "source": [ 307 | "Let's initialize the model:\n", 308 | " \n", 309 | " - We need a seed for the RNG that generates the initial weights\n", 310 | " - We need a sample input" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 269, 316 | "id": "add7d9bd-8028-43e0-b62f-d1c8f05b0de7", 317 | "metadata": {}, 318 | "outputs": [], 319 | "source": [ 320 | "seed = 123\n", 321 | "\n", 322 | "model = Model(hidden_width = 1024, n_outputs=10)\n", 323 | "\n", 324 | "key = jax.random.PRNGKey(seed)\n", 325 | "sample_input = jnp.ones([1, 28, 28, 1])\n", 326 | "\n", 327 | "pars = model.init(key, sample_input)" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "id": "7604bf44-3b6e-4550-93bf-af5253a50364", 333 | "metadata": {}, 334 | "source": [ 335 | "we can inspect the parameters `pars`:" 336 | ] 337 | }, 338 | { 339 | "cell_type": "code", 340 | "execution_count": 270, 341 | "id": "a7d79701-b6c0-4207-aab6-d1c9473856f1", 342 | "metadata": {}, 343 | "outputs": [ 344 | { 345 | "data": { 346 | "text/plain": [ 347 | "FrozenDict({\n", 348 | " params: {\n", 349 | " Dense_0: {\n", 350 | " kernel: DeviceArray([[ 0.06272361, -0.04523007, 0.04455307, ..., 0.0401537 ,\n", 351 | " 0.04866179, -0.05893217],\n", 352 | " [ 0.03216646, -0.05071622, 0.01560513, ..., -0.00933223,\n", 353 | " -0.00218637, 0.01502309],\n", 354 | " [ 0.06411006, -0.0392851 , -0.02909053, ..., 0.01578827,\n", 355 | " -0.02766293, 0.0776424 ],\n", 356 | " ...,\n", 357 | " [ 0.01668577, 0.00019089, 0.02212064, ..., -0.01364938,\n", 358 | " -0.00778654, -0.01584569],\n", 359 | " [-0.01295255, 0.00911052, 0.00867082, ..., 0.02317737,\n", 360 | " -0.01509016, -0.01241465],\n", 361 | " [ 0.01655648, 0.04822065, 0.01358693, ..., -0.0194821 ,\n", 362 | " -0.00854595, 0.01909487]], dtype=float32),\n", 363 | " bias: DeviceArray([0., 0., 0., ..., 0., 0., 0.], dtype=float32),\n", 364 | " },\n", 365 | " Dense_1: {\n", 366 | " kernel: DeviceArray([[-0.0314371 , 0.00309207, -0.02992933, ..., -0.0403144 ,\n", 367 | " 0.034722 , -0.04977578],\n", 368 | " [ 0.04734345, -0.0380396 , -0.01267887, ..., -0.0117529 ,\n", 369 | " -0.0470979 , -0.03807784],\n", 370 | " [-0.01505996, 0.01678214, 0.01899202, ..., -0.0485573 ,\n", 371 | " 0.0563934 , 0.00642153],\n", 372 | " ...,\n", 373 | " [ 0.00970839, 0.03326492, -0.03198652, ..., -0.023723 ,\n", 374 | " -0.02472805, -0.02167386],\n", 375 | " [-0.00639112, -0.04331256, 0.04033599, ..., 0.03074743,\n", 376 | " -0.00257638, 0.06517929],\n", 377 | " [-0.00433151, -0.01294066, -0.01874469, ..., -0.00766201,\n", 378 | " -0.04973738, -0.00242333]], dtype=float32),\n", 379 | " bias: DeviceArray([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32),\n", 380 | " },\n", 381 | " },\n", 382 | "})" 383 | ] 384 | }, 385 | "execution_count": 270, 386 | "metadata": {}, 387 | "output_type": "execute_result" 388 | } 389 | ], 390 | "source": [ 391 | "pars" 392 | ] 393 | }, 394 | { 395 | "cell_type": "code", 396 | "execution_count": 271, 397 | "id": "b3e0b121-050c-443e-ac7f-9c639b2fb4e5", 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "data": { 402 | "text/plain": [ 403 | "DeviceArray([[0.07704171, 0.08986449, 0.07054991, 0.04636909, 0.30741608,\n", 404 | " 0.064256 , 0.10086796, 0.06952595, 0.10017835, 0.07393045]], dtype=float32)" 405 | ] 406 | }, 407 | "execution_count": 271, 408 | "metadata": {}, 409 | "output_type": "execute_result" 410 | } 411 | ], 412 | "source": [ 413 | "# sample application:\n", 414 | "jnp.exp(model.apply(pars, jnp.ones([1, 28, 28, 1])))" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "id": "11ebdbb9-8e06-4620-89f8-572f752ff73e", 420 | "metadata": {}, 421 | "source": [ 422 | "## 3 - Writing the loss function\n", 423 | "\n", 424 | "We now want to take as a loss function the distance between the _predicted_ probability given by the model $q_W^{(i)}(x)$ and the actualy probabilith $p^{(i)}(x)$.\n", 425 | "\n", 426 | "The actual probability is a delta function: it is zero for every label except for the correct one, for which it is 1.\n", 427 | "\n", 428 | "To perform this, we can use one-hot encoding, which takes an integer value in $i\\in[0..K]$ and returns a vector in $R^K$ where only the i-th component is 1 and the other are zero: $v_j = \\delta_{i,j}$.\n", 429 | "\n", 430 | "See the examples below:" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": 272, 436 | "id": "d168c699-dbc5-4f84-9f1f-40511e35050f", 437 | "metadata": {}, 438 | "outputs": [ 439 | { 440 | "name": "stdout", 441 | "output_type": "stream", 442 | "text": [ 443 | "0 becomes: [1. 0. 0. 0. 0.]\n", 444 | "1 becomes: [0. 1. 0. 0. 0.]\n", 445 | "2 becomes: [0. 0. 1. 0. 0.]\n", 446 | "3 becomes: [0. 0. 0. 1. 0.]\n", 447 | "4 becomes: [0. 0. 0. 0. 1.]\n" 448 | ] 449 | } 450 | ], 451 | "source": [ 452 | "for i in range(5):\n", 453 | " print(f\"{i} becomes: {jax.nn.one_hot(i, 5)}\")" 454 | ] 455 | }, 456 | { 457 | "cell_type": "markdown", 458 | "id": "1c72691d-9a4e-4826-a042-67b0aaa3238e", 459 | "metadata": {}, 460 | "source": [ 461 | "For the loss function, i'll draw from my vast knowledge of loss functions (aka: [here](https://optax.readthedocs.io/en/latest/api.html)) and choose `optax.softmax_cross_entropy`\n", 462 | "\n", 463 | "`?optax.softmax_cross_entropy`\n", 464 | "> Computes the softmax cross entropy between sets of logits and labels.\n", 465 | ">\n", 466 | ">Measures the probability error in discrete classification tasks in which\n", 467 | ">the classes are mutually exclusive (each entry is in exactly one class).\n", 468 | ">For example, each CIFAR-10 image is labeled with one and only one label:\n", 469 | ">an image can be a dog or a truck, but not both.\n", 470 | ">\n", 471 | ">References:\n", 472 | "> [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n", 473 | ">\n", 474 | ">Args:\n", 475 | ">\n", 476 | "> logits: unnormalized log probabilities.\n", 477 | ">\n", 478 | "> labels: a valid probability distribution (non-negative, sum to 1), e.g a\n", 479 | "> one hot encoding of which class is the correct one for each input.\n" 480 | ] 481 | }, 482 | { 483 | "cell_type": "code", 484 | "execution_count": 273, 485 | "id": "e4112499-6b6f-445e-bd18-22fd5b0b2a78", 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [ 489 | "# The loss function that we will use\n", 490 | "def cross_entropy(*, logits, labels):\n", 491 | " one_hot_labels = jax.nn.one_hot(labels, num_classes=10)\n", 492 | " return -jnp.mean(jnp.sum(one_hot_labels * logits, axis=-1))" 493 | ] 494 | }, 495 | { 496 | "cell_type": "code", 497 | "execution_count": 274, 498 | "id": "e90668ed-0090-446a-81d3-4f6e1763b2e2", 499 | "metadata": {}, 500 | "outputs": [], 501 | "source": [ 502 | "# Our cost function that we will be optimising\n", 503 | "def loss_fn(params, images, labels):\n", 504 | " # compute the output of the model\n", 505 | " logits = model.apply({'params': params}, images)\n", 506 | " return cross_entropy(logits=logits, labels=labels)\n", 507 | "\n", 508 | "# An utility function to compute some metrics during the training\n", 509 | "def compute_metrics(*, logits, labels):\n", 510 | " loss = cross_entropy(logits=logits, labels=labels)\n", 511 | " accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)\n", 512 | " metrics = {\n", 513 | " 'loss': loss,\n", 514 | " 'accuracy': accuracy,\n", 515 | " }\n", 516 | " return metrics" 517 | ] 518 | }, 519 | { 520 | "cell_type": "markdown", 521 | "id": "b5915221-3875-46a9-b6dd-1b32b17e38c0", 522 | "metadata": {}, 523 | "source": [ 524 | "We then need a function to keep the training state in memory" 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": 275, 530 | "id": "50082ccb-9022-496a-8898-f2b645116347", 531 | "metadata": {}, 532 | "outputs": [], 533 | "source": [ 534 | "from flax.training import train_state # Useful dataclass to keep train state\n", 535 | "\n", 536 | "def create_train_state(rng, learning_rate, momentum):\n", 537 | " \"\"\"Creates initial `TrainState`.\"\"\"\n", 538 | " params = model.init(rng, jnp.ones([1, 28, 28, 1]))['params']\n", 539 | " tx = optax.sgd(learning_rate, momentum)\n", 540 | " return train_state.TrainState.create(\n", 541 | " apply_fn=model.apply, params=params, tx=tx)\n" 542 | ] 543 | }, 544 | { 545 | "cell_type": "code", 546 | "execution_count": null, 547 | "id": "038dfa1c-53cb-4b6a-a177-2a7233b87853", 548 | "metadata": {}, 549 | "outputs": [], 550 | "source": [] 551 | }, 552 | { 553 | "cell_type": "code", 554 | "execution_count": 276, 555 | "id": "5f6b6254-da8a-4bd5-ad56-15b13a3c2bd2", 556 | "metadata": {}, 557 | "outputs": [], 558 | "source": [ 559 | "from functools import partial\n", 560 | "\n", 561 | "@jax.jit\n", 562 | "def train_step(state, batch):\n", 563 | " \"\"\"Train for a single step.\"\"\"\n", 564 | " _loss_fn = partial(loss_fn, images=batch['image'], labels=batch['label'])\n", 565 | " \n", 566 | " grad_fn = jax.value_and_grad(_loss_fn)\n", 567 | " loss, grads = grad_fn(state.params)\n", 568 | " \n", 569 | " state = state.apply_gradients(grads=grads)\n", 570 | " \n", 571 | " logits = model.apply({'params': state.params}, batch['image'])\n", 572 | " metrics = compute_metrics(logits=logits, labels=batch['label'])\n", 573 | " \n", 574 | " return state, metrics\n" 575 | ] 576 | }, 577 | { 578 | "cell_type": "code", 579 | "execution_count": 277, 580 | "id": "4d0ec7de-f082-4276-96e6-86c59712ba69", 581 | "metadata": {}, 582 | "outputs": [], 583 | "source": [ 584 | "@jax.jit\n", 585 | "def eval_step(params, batch):\n", 586 | " logits = model.apply({'params': params}, batch['image'])\n", 587 | " return compute_metrics(logits=logits, labels=batch['label'])" 588 | ] 589 | }, 590 | { 591 | "cell_type": "code", 592 | "execution_count": 278, 593 | "id": "e2f17cf0-198d-4d57-88d9-de60e6e76492", 594 | "metadata": {}, 595 | "outputs": [], 596 | "source": [ 597 | "def train_epoch(state, train_ds, batch_size, epoch, rng, *, max_steps=None):\n", 598 | " \"\"\"Train for a single epoch.\"\"\"\n", 599 | " \n", 600 | " # total number of training images\n", 601 | " train_ds_size = len(train_ds['image'])\n", 602 | " \n", 603 | " steps_per_epoch = train_ds_size // batch_size\n", 604 | "\n", 605 | " # Truncate the number of steps (used to speed up training)\n", 606 | " if max_steps is not None:\n", 607 | " steps_per_epoch = min(steps_per_epoch, max_steps)\n", 608 | "\n", 609 | "\n", 610 | " # generate a random permutation of the indices to shuffle the training\n", 611 | " # dataset, and reshape it to a set of batches.\n", 612 | " perms = jax.random.permutation(rng, train_ds_size)\n", 613 | " perms = perms[:steps_per_epoch * batch_size] # skip incomplete batch\n", 614 | " perms = perms.reshape((steps_per_epoch, batch_size))\n", 615 | " \n", 616 | " # execute the training step for every mini-batch\n", 617 | " batch_metrics = []\n", 618 | " for perm in perms:\n", 619 | " batch = {k: v[perm, ...] for k, v in train_ds.items()}\n", 620 | " state, metrics = train_step(state, batch)\n", 621 | " batch_metrics.append(metrics)\n", 622 | "\n", 623 | " # compute mean of metrics across each batch in epoch.\n", 624 | " batch_metrics_np = jax.device_get(batch_metrics)\n", 625 | " epoch_metrics_np = {\n", 626 | " k: np.mean([metrics[k] for metrics in batch_metrics_np])\n", 627 | " for k in batch_metrics_np[0]}\n", 628 | "\n", 629 | " return state, epoch_metrics_np\n", 630 | "\n", 631 | "def eval_model(params, test_ds):\n", 632 | " \"\"\"\n", 633 | " evaluate the performance of the model on the test dataset\n", 634 | " \"\"\"\n", 635 | " metrics = eval_step(params, test_ds)\n", 636 | " metrics = jax.device_get(metrics)\n", 637 | " summary = jax.tree_map(lambda x: x.item(), metrics)\n", 638 | " return summary['loss'], summary['accuracy']" 639 | ] 640 | }, 641 | { 642 | "cell_type": "code", 643 | "execution_count": 279, 644 | "id": "6e7263bc-b1f0-4a71-8b2a-ff19daf8257b", 645 | "metadata": {}, 646 | "outputs": [], 647 | "source": [ 648 | "rng = jax.random.PRNGKey(0)\n", 649 | "rng, init_rng = jax.random.split(rng)" 650 | ] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": 280, 655 | "id": "7f0eee07-7262-49df-82cb-df181c9ed707", 656 | "metadata": {}, 657 | "outputs": [], 658 | "source": [ 659 | "learning_rate = 0.1\n", 660 | "momentum = 0.9" 661 | ] 662 | }, 663 | { 664 | "cell_type": "code", 665 | "execution_count": 281, 666 | "id": "7c1094b1-4937-4eb0-a51f-418f5d3dfc17", 667 | "metadata": {}, 668 | "outputs": [], 669 | "source": [ 670 | "state = create_train_state(init_rng, learning_rate, momentum)" 671 | ] 672 | }, 673 | { 674 | "cell_type": "code", 675 | "execution_count": 282, 676 | "id": "0b195b90-b838-4b75-96a1-28b5302cf6d6", 677 | "metadata": {}, 678 | "outputs": [], 679 | "source": [ 680 | "num_epochs = 10\n", 681 | "batch_size = 32" 682 | ] 683 | }, 684 | { 685 | "cell_type": "code", 686 | "execution_count": null, 687 | "id": "6a077215-b821-45cf-8747-cd7154f4351a", 688 | "metadata": {}, 689 | "outputs": [ 690 | { 691 | "name": "stderr", 692 | "output_type": "stream", 693 | "text": [ 694 | " 0%| | 0/10 [00:00 jax.numpy.DeviceArray 19 | 20 | If you want to know about this file in detail, please visit the original code: 21 | https://github.com/pytorch/vision/blob/master/torchvision/utils.py 22 | """ 23 | import math 24 | from PIL import Image 25 | 26 | import jax 27 | import jax.numpy as jnp 28 | 29 | 30 | def save_image(ndarray, fp, nrow=8, padding=2, pad_value=0.0, format=None): 31 | """Make a grid of images and Save it into an image file. 32 | Args: 33 | ndarray (array_like): 4D mini-batch images of shape (B x H x W x C) 34 | fp - A filename(string) or file object 35 | nrow (int, optional): Number of images displayed in each row of the grid. 36 | The final grid size is ``(B / nrow, nrow)``. Default: ``8``. 37 | padding (int, optional): amount of padding. Default: ``2``. 38 | scale_each (bool, optional): If ``True``, scale each image in the batch of 39 | images separately rather than the (min, max) over all images. Default: ``False``. 40 | pad_value (float, optional): Value for the padded pixels. Default: ``0``. 41 | format(Optional): If omitted, the format to use is determined from the filename extension. 42 | If a file object was used instead of a filename, this parameter should always be used. 43 | """ 44 | if not (isinstance(ndarray, jnp.ndarray) or 45 | (isinstance(ndarray, list) and all(isinstance(t, jnp.ndarray) for t in ndarray))): 46 | raise TypeError('array_like of tensors expected, got {}'.format(type(ndarray))) 47 | 48 | ndarray = jnp.asarray(ndarray) 49 | 50 | if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images 51 | ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1) 52 | 53 | # make the mini-batch of images into a grid 54 | nmaps = ndarray.shape[0] 55 | xmaps = min(nrow, nmaps) 56 | ymaps = int(math.ceil(float(nmaps) / xmaps)) 57 | height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding) 58 | num_channels = ndarray.shape[3] 59 | grid = jnp.full((height * ymaps + padding, width * xmaps + padding, num_channels), pad_value).astype(jnp.float32) 60 | k = 0 61 | for y in range(ymaps): 62 | for x in range(xmaps): 63 | if k >= nmaps: 64 | break 65 | grid = jax.ops.index_update( 66 | grid, jax.ops.index[y * height + padding:(y + 1) * height, 67 | x * width + padding:(x + 1) * width], 68 | ndarray[k]) 69 | k = k + 1 70 | 71 | # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer 72 | ndarr = jnp.clip(grid * 255.0 + 0.5, 0, 255).astype(jnp.uint8) 73 | im = Image.fromarray(ndarr.copy()) 74 | im.save(fp, format=format) 75 | 76 | # Utility functions (don't worry. you don't need to understand this one) 77 | from matplotlib import pyplot as plt 78 | 79 | def show_img(img, ax=None, title=None): 80 | """Shows a single image.""" 81 | if ax is None: 82 | ax = plt.gca() 83 | ax.imshow(img[..., 0], cmap='gray') 84 | ax.set_xticks([]) 85 | ax.set_yticks([]) 86 | if title: 87 | ax.set_title(title) 88 | 89 | def show_img_grid(imgs, titles): 90 | """Shows a grid of images.""" 91 | n = int(np.ceil(len(imgs)**.5)) 92 | _, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n)) 93 | for i, (img, title) in enumerate(zip(imgs, titles)): 94 | show_img(img, axs[i // n][i % n], title) 95 | -------------------------------------------------------------------------------- /2202_NetKet/README.md: -------------------------------------------------------------------------------- 1 | # NetKet-lectures 2 | 3 | A collection of lectures on NetKet 4 | 5 | - 1: An introduction to NetKet 3, Variational Monte Carlo and Flax 6 | - 2: How to use the Time-Dependent-Variational-Principle to study the unitary dynamics -------------------------------------------------------------------------------- /2204_Toulouse-jax-netket/3-nqs-dynamics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "2cb283d3-2996-4cae-9253-22df9292c723", 6 | "metadata": { 7 | "id": "2cb283d3-2996-4cae-9253-22df9292c723" 8 | }, 9 | "source": [ 10 | "# Toulouse School on Machine Learning in Quantum Many-Body Physics\n", 11 | "\n", 12 | "## Tutorial: Dynamics with neural quantum states\n", 13 | "\n", 14 | "Damian Hofmann\n", 15 | "\n", 16 | "[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PhilipVinc/Lectures/blob/main/2204_Toulouse-jax-netket/3-nqs-dynamics.ipynb) \n", 17 | "\n", 18 | "In this short tutorial session, we will follow up on the previous sessions and lectures on neural quantum states and demonstrate how to compute quantum dynamics using time-dependent variational Monte Carlo in NetKet." 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "id": "176b97f1-2c29-4690-a238-6afc5bb40191", 24 | "metadata": { 25 | "id": "176b97f1-2c29-4690-a238-6afc5bb40191", 26 | "tags": [] 27 | }, 28 | "source": [ 29 | "## 0. Setup\n", 30 | "\n", 31 | "To run this notebook, please install the following packages:\n", 32 | "```\n", 33 | "jax==0.3.4\n", 34 | "jaxlib==0.3.2\n", 35 | "numpy==1.21.5\n", 36 | "netket==3.4.0\n", 37 | "matplotlib==3.5.1\n", 38 | "```\n", 39 | " \n", 40 | "You can run this notebook in Google Colab during the class using the link above.\n", 41 | "\n", 42 | "To install the packages in Colab you need to run the cell below. (If you are running in your own environment, you do not need to run that cell.)" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "id": "9d8ac809-9148-4316-afb2-4bd884ae3f16", 49 | "metadata": { 50 | "id": "9d8ac809-9148-4316-afb2-4bd884ae3f16", 51 | "tags": [] 52 | }, 53 | "outputs": [], 54 | "source": [ 55 | "!pip install jax==0.3.4 jaxlib==0.3.2 numpy==1.21.5 netket==3.4.0 matplotlib==3.5.1" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "id": "7214ffe3-e1bc-4ff9-8bc6-2d0b67b37ef6", 61 | "metadata": { 62 | "id": "7214ffe3-e1bc-4ff9-8bc6-2d0b67b37ef6" 63 | }, 64 | "source": [ 65 | "## 1. System\n", 66 | "\n", 67 | "As in the previous tutorial, we take the transverse-field Ising model as an example:\n", 68 | "$$\n", 69 | " \\hat H = \\sum_{ij} \\hat\\sigma^z_i\\hat\\sigma^z_j - h \\sum_i \\sigma^x_i.\n", 70 | "$$" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "id": "288bdeb8-305f-4181-8fad-d7d944845be4", 77 | "metadata": { 78 | "colab": { 79 | "base_uri": "https://localhost:8080/" 80 | }, 81 | "id": "288bdeb8-305f-4181-8fad-d7d944845be4", 82 | "outputId": "5deff0b3-c937-4714-f8e3-45a25bab9928" 83 | }, 84 | "outputs": [], 85 | "source": [ 86 | "# import some modules\n", 87 | "import netket as nk\n", 88 | "import flax.linen as nn\n", 89 | "\n", 90 | "import jax\n", 91 | "import jax.numpy as jnp\n", 92 | "import numpy as np\n", 93 | "\n", 94 | "import matplotlib.pyplot as plt\n", 95 | "from tqdm import tqdm\n", 96 | "\n", 97 | "from functools import partial\n", 98 | "\n", 99 | "rng = nk.jax.PRNGSeq(123)" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "id": "d4a746f0-c15f-4f87-b5de-e9ffff6a5542", 105 | "metadata": { 106 | "id": "d05cc62a-6cc4-43f9-b9d5-b7dbb65a5c8f" 107 | }, 108 | "source": [ 109 | "First, let's setup the model and a simple variational ansatz. For demonstration purposes, let's use a very small spin system with an Ising Hamiltonian:" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "id": "5ec4b666-ba2f-485e-9312-d236f7636ea4", 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "L = 8\n", 120 | "hilbert = nk.hilbert.Spin(1/2, N=L)" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "id": "a693d896-22e2-4b0b-8fab-6fb297e455d7", 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "print(hilbert)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "id": "15471ff0-c997-4c6c-89f1-bcbba91fc85a", 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "lat = nk.graph.Chain(length=L, pbc=True)\n", 141 | "lat.draw()" 142 | ] 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "id": "29842de5-7524-4dc8-9773-7a8856c53abf", 147 | "metadata": {}, 148 | "source": [ 149 | "Define two Ising Hamiltonians (to perform quenches later) as well as an observable, the magnetization along x." 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "id": "7d63f23f-fcee-4bfd-8647-92aff8099e18", 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "ham = nk.operator.Ising(hilbert, lat, h=1.0)\n", 160 | "ham" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "id": "8c76e339-3b83-49c4-a342-8fa1822840cb", 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "ham1 = nk.operator.Ising(hilbert, lat, h=0.5)\n", 171 | "ham1" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "id": "06a2d86f-2f68-4b1b-925e-a7c01f333c00", 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "mag_x = sum(nk.operator.spin.sigmax(hilbert, i) for i in range(lat.n_nodes))\n", 182 | "mag_x" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "id": "9c0419f2-d674-4e83-af77-2b3f31e8a25e", 188 | "metadata": {}, 189 | "source": [ 190 | "We use the Jastrow ansatz from yesterday as a first example:\n", 191 | "$$ \\langle \\sigma^{z}_1,\\dots \\sigma^{z}_N| \\Psi_{\\mathrm{jas}} \\rangle = \\cdot \\exp \\left( \\sum_i J_1 \\sigma^{z}_i\\sigma^{z}_{i+1} + J_2 \\sigma^{z}_i\\sigma^{z}_{i+2} \\right).$$" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "id": "1324202c-2cf2-4c3f-ac88-75ac5c2bf38a", 198 | "metadata": {}, 199 | "outputs": [], 200 | "source": [ 201 | "class JasShort(nn.Module):\n", 202 | " @nn.compact\n", 203 | " def __call__(self, x):\n", 204 | " \n", 205 | " # Define the two variational parameters J1 and J2\n", 206 | " j1 = self.param(\n", 207 | " \"j1\", nn.initializers.normal(), (1,), complex\n", 208 | " )\n", 209 | " j2 =self.param(\n", 210 | " \"j2\", nn.initializers.normal(), (1,), complex\n", 211 | " )\n", 212 | "\n", 213 | " # compute the nearest-neighbor correlations\n", 214 | " corr1=x*jnp.roll(x,-1,axis=-1)\n", 215 | " corr2=x*jnp.roll(x,-2,axis=-1)\n", 216 | "\n", 217 | " # sum the output\n", 218 | " return jnp.sum(j1*corr1+j2*corr2,axis=-1)" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": null, 224 | "id": "43fa68cf-cd54-496f-88d2-b85e841c9509", 225 | "metadata": {}, 226 | "outputs": [], 227 | "source": [ 228 | "# Create MC state from ansatz\n", 229 | "sampler = nk.sampler.MetropolisLocal(hilbert, n_chains=32)\n", 230 | "ansatz = JasShort()\n", 231 | "vstate = nk.vqs.MCState(sampler, ansatz,\n", 232 | " n_samples=16000,\n", 233 | " sampler_seed=rng.next(), seed=rng.next())" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "id": "7b13237e-6bd4-4bbf-bafe-ad8f1370e397", 239 | "metadata": {}, 240 | "source": [ 241 | "## 2. Time dependent variational Monte Carlo" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "id": "3a230131-2f65-4518-9d42-df38cda9d678", 247 | "metadata": {}, 248 | "source": [ 249 | "You have heard in the lecture this morning how to do time evolution on a variational ansatz.\n", 250 | "Otherwise, helpful references for the derivation of the TDVP equations of motion are, e.g, Yuan et al. (Quantum 3, 191, 2019), and Stokes et al. (arXiv:2203.14824).\n", 251 | "\n", 252 | "We assume to have complex parameters $\\theta$ with holomorphic mapping $\\theta \\mapsto \\psi_\\theta.$\n", 253 | "In order to evolve the variational ansatz $$(\\theta, s) \\mapsto \\psi_\\theta(s)$$, we can locally optimize the fidelity\n", 254 | "$$\n", 255 | " \\max_{\\delta\\theta} |\\langle \\mathrm{e}^{-\\gamma \\hat H \\delta t} \\psi_{\\theta} | \\psi_{\\theta + \\delta\\theta} \\rangle|^2.\n", 256 | "$$\n", 257 | "Taylor expanding this condition to second order in $\\delta\\theta$ and $\\delta t$ yields after some steps the equation of motion\n", 258 | "$$\n", 259 | "G(\\theta) \\, \\dot\\theta = -\\gamma F(\\theta, t)\n", 260 | "$$\n", 261 | "with the quantum geometric tensor\n", 262 | "$$\n", 263 | " G_{ij}(\\theta) = \\frac{\n", 264 | " \\langle\\partial_i\\psi_\\theta | \\partial_j\\psi_\\theta\\rangle\n", 265 | " }{\n", 266 | " \\langle \\psi_\\theta | \\psi_\\theta \\rangle\n", 267 | " } - \\frac{\n", 268 | " \\langle\\partial_i\\psi_\\theta | \\psi_\\theta \\rangle\\langle \\psi_\\theta | \\partial_j\\psi_\\theta\\rangle\n", 269 | " }{\n", 270 | " \\langle \\psi_\\theta | \\psi_\\theta \\rangle^2\n", 271 | " }\n", 272 | "$$and gradient $$ F_i(\\theta, t) = \\frac{\\partial\\langle \\hat H \\rangle}{\\partial\\theta_i^*}$$\n", 273 | "$\\gamma = 1$ results in imaginary time evolution, $\\gamma = \\mathrm i$ gives real time evolution instead." 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "id": "c0d1c058-ce73-4ef3-b1ae-9935739fb633", 279 | "metadata": {}, 280 | "source": [ 281 | "$G$ and $F$ can be estimated using Monte Carlo sampling: Given $s \\sim |\\psi_\\theta(s)|^2$, we can estimate those quantities by\n", 282 | "$$\n", 283 | " G_{ij} = \\operatorname{cov}(o_i, o_j)\n", 284 | " \\qquad\n", 285 | " F_i = \\operatorname{cov}(o_i, h)\n", 286 | "$$\n", 287 | "whith the local energy\n", 288 | "$$\n", 289 | "h(s) = \\frac{\\langle s | \\hat H | \\psi_\\theta \\rangle}{\\langle s | \\psi_\\theta \\rangle}\n", 290 | " = \\sum_{s'} \\frac{\\psi_\\theta(s')}{\\psi_\\theta(s)} \\langle s | \\hat H | s' \\rangle\n", 291 | "$$\n", 292 | "and \"quantum score function\"\n", 293 | "$$\n", 294 | " o_j(s) = \\frac{\\partial\\ln\\psi_\\theta(s)}{\\partial\\theta_j}.\n", 295 | "$$" 296 | ] 297 | }, 298 | { 299 | "cell_type": "markdown", 300 | "id": "4e3b58c9-1c08-4b98-887c-90b99d32432a", 301 | "metadata": {}, 302 | "source": [ 303 | "In NetKet, the quantum geometric tensor $G(\\theta)$ is available from the variational state class:" 304 | ] 305 | }, 306 | { 307 | "cell_type": "code", 308 | "execution_count": null, 309 | "id": "49870150-b720-4ba7-af74-403dc4b7481d", 310 | "metadata": {}, 311 | "outputs": [], 312 | "source": [ 313 | "vstate.quantum_geometric_tensor()" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": null, 319 | "id": "836d1805-e7f7-4e27-8b69-c8f838e67bf7", 320 | "metadata": {}, 321 | "outputs": [], 322 | "source": [ 323 | "vstate.quantum_geometric_tensor().to_dense()" 324 | ] 325 | }, 326 | { 327 | "cell_type": "markdown", 328 | "id": "9d59acac-9580-438c-b6fd-d22700267d40", 329 | "metadata": {}, 330 | "source": [ 331 | "### 2.1 DIY time stepping loop" 332 | ] 333 | }, 334 | { 335 | "cell_type": "markdown", 336 | "id": "d54a3d4b-ac92-4bf2-b27f-072334dc920f", 337 | "metadata": {}, 338 | "source": [ 339 | "Let us build a very simple ODE solver based on the Euler method where, at each time step, we update our state as\n", 340 | "$$\n", 341 | " \\theta(t + \\delta t) = \\theta(t) + \\dot\\theta \\delta t.\n", 342 | "$$\n", 343 | "As an interactive task, I will now give you 10-15 min of time to try and implement a solver loop that, given a Hamiltonian, vstate, initial time t0, (fixed) time step dt, end time t_end, and the factor gamma from above performs t-VMC time propagation.\n", 344 | "\n", 345 | "Some hints:\n", 346 | " * `vstate.expect_and_grad` gives you the expectation value and gradient of an operator.\n", 347 | " * We have seen `vstate.quantum_geomtric_tensor` above.\n", 348 | " * A standard method for solving a linear (least-squares) equation system is `jnp.linalg.lstsq`. (There are others, which you can also use.)\n", 349 | " * Remember to update `vstate.parameters` is a PyTree (and you need to update it in the end)." 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": null, 355 | "id": "08ddbcdc-9f68-415b-b813-b30884f160f2", 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "# version 1, using netket.optimizer.solver.svd\n", 360 | "def time_propagation(hamiltonian, vstate, t0, dt, t_end, gamma=1.0j):\n", 361 | " t = t0\n", 362 | " while t < t_end:\n", 363 | " # get energy and gradient\n", 364 | " E, F = vstate.expect_and_grad(hamiltonian)\n", 365 | " # get the QGT object from the variational state\n", 366 | " G = vstate.quantum_geometric_tensor()\n", 367 | " # multiply F by the factor -gamma\n", 368 | " F = jax.tree_map(lambda f: -gamma * f, F)\n", 369 | " # use G.solve and the SVD solver nk.optimizer.solver.svd\n", 370 | " dtheta, _ = G.solve(nk.optimizer.solver.svd, F)\n", 371 | " # apply update theta += dt * dtheta\n", 372 | " vstate.parameters = jax.tree_map(\n", 373 | " lambda x, y: x + dt * y, vstate.parameters, dtheta\n", 374 | " )\n", 375 | " t = t + dt\n", 376 | " yield t, vstate.expect(hamiltonian), vstate.expect(mag_x)\n", 377 | " \n", 378 | "# version 2, using jnp.linalg.lstsq (which requires unpacking and repacking\n", 379 | "# the parameters into a pytree\n", 380 | "def time_propagation2(hamiltonian, vstate, t0, dt, t_end, gamma=1.0j):\n", 381 | " t = t0\n", 382 | " while t < t_end:\n", 383 | " # get energy and gradient\n", 384 | " E, F = vstate.expect_and_grad(hamiltonian)\n", 385 | " \n", 386 | " # convert G and F to arrays to pass to lstsq\n", 387 | " G = vstate.quantum_geometric_tensor().to_dense()\n", 388 | " # convert F to a vector; the second return value\n", 389 | " # is a function that can convert vectors back to the\n", 390 | " # pytree structure of F (which is the same structure as\n", 391 | " # the params\n", 392 | " F, unravel_params = nk.jax.tree_ravel(F)\n", 393 | " F *= -gamma\n", 394 | " \n", 395 | " # lstsq returns dtheta and some other stuff, which we ignore by\n", 396 | " # assigning them to `_*`\n", 397 | " # rcond cuts off very small singular values of G when solving the equation\n", 398 | " dtheta, *_ = jnp.linalg.lstsq(G, F, rcond=1e-14)\n", 399 | " # convert back to a pytree\n", 400 | " dtheta = unravel_params(dtheta)\n", 401 | " \n", 402 | " vstate.parameters = jax.tree_map(\n", 403 | " lambda x, y: x + dt * y, vstate.parameters, dtheta)\n", 404 | " t = t + dt\n", 405 | " yield t, vstate.expect(hamiltonian), vstate.expect(mag_x)" 406 | ] 407 | }, 408 | { 409 | "cell_type": "code", 410 | "execution_count": null, 411 | "id": "8e011be1-6df5-4797-9cfd-2d555c05e337", 412 | "metadata": {}, 413 | "outputs": [], 414 | "source": [] 415 | }, 416 | { 417 | "cell_type": "code", 418 | "execution_count": null, 419 | "id": "ee02efc2-b9c4-441f-a199-1d52deb9da30", 420 | "metadata": {}, 421 | "outputs": [], 422 | "source": [] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": null, 427 | "id": "e2058078-a0e4-4aaf-b335-15e56796cb7c", 428 | "metadata": {}, 429 | "outputs": [], 430 | "source": [ 431 | "# We want to test it on our example system like this (gamma=-1 gives us imaginary-time propagation):\n", 432 | "times = []\n", 433 | "energies = []\n", 434 | "mag = []\n", 435 | "with tqdm(time_propagation2(ham, vstate, t0=0, dt=0.01, t_end=4, gamma=1)) as progress:\n", 436 | " # we make use of time, energy, and magnetization being returned from the solver loop\n", 437 | " for t, E, mx in progress:\n", 438 | " times.append(t)\n", 439 | " energies.append(E)\n", 440 | " mag.append(mx)\n", 441 | " progress.set_postfix(t=t)" 442 | ] 443 | }, 444 | { 445 | "cell_type": "code", 446 | "execution_count": null, 447 | "id": "9a718ef3-f49c-41bb-b96b-c8ca45d4387a", 448 | "metadata": {}, 449 | "outputs": [], 450 | "source": [ 451 | "# Plot the results" 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": null, 457 | "id": "30bd8fe7-1cfe-478b-b548-d026461a7680", 458 | "metadata": {}, 459 | "outputs": [], 460 | "source": [ 461 | "plt.plot(times, [e.mean.real for e in energies])" 462 | ] 463 | }, 464 | { 465 | "cell_type": "code", 466 | "execution_count": null, 467 | "id": "2aa404ad-4148-4895-ace0-16b0083d6cb9", 468 | "metadata": {}, 469 | "outputs": [], 470 | "source": [ 471 | "plt.plot(times, [m.mean.real for m in mag])" 472 | ] 473 | }, 474 | { 475 | "cell_type": "markdown", 476 | "id": "6d48daee-247f-4b0b-a45f-1df23311e1d8", 477 | "metadata": {}, 478 | "source": [ 479 | "Save the optimal parameters we have found:" 480 | ] 481 | }, 482 | { 483 | "cell_type": "code", 484 | "execution_count": null, 485 | "id": "47b0c46d-3c35-40d3-b288-ac41b0bb7721", 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [ 489 | "params0 = jax.tree_map(np.copy, vstate.parameters)" 490 | ] 491 | }, 492 | { 493 | "cell_type": "markdown", 494 | "id": "b0820097-86c6-45bb-90f4-faf8c36a143b", 495 | "metadata": {}, 496 | "source": [ 497 | "Let's try some real-time propagation, starting from the approximate state we found just now:" 498 | ] 499 | }, 500 | { 501 | "cell_type": "code", 502 | "execution_count": null, 503 | "id": "574a88f7-af9b-4c7d-b0b3-df9e3eaa5688", 504 | "metadata": {}, 505 | "outputs": [], 506 | "source": [ 507 | "vstate.parameters = jax.tree_map(np.copy, params0)\n", 508 | "\n", 509 | "times = []\n", 510 | "energies = []\n", 511 | "mag = []\n", 512 | "with tqdm(time_propagation(ham1, vstate, t0=0, dt=0.001, t_end=1.0, gamma=1j)) as progress:\n", 513 | " for t, E, mx in progress:\n", 514 | " times.append(t)\n", 515 | " energies.append(E)\n", 516 | " mag.append(mx)\n", 517 | " \n", 518 | " progress.set_postfix(t=t, E=E)" 519 | ] 520 | }, 521 | { 522 | "cell_type": "code", 523 | "execution_count": null, 524 | "id": "8ff135f6-d963-495c-a80d-827d5a3704d2", 525 | "metadata": {}, 526 | "outputs": [], 527 | "source": [ 528 | "plt.plot(times, [e.mean.real for e in energies])" 529 | ] 530 | }, 531 | { 532 | "cell_type": "code", 533 | "execution_count": null, 534 | "id": "9d5cd270-1721-4deb-9e3b-88e40a23a8c7", 535 | "metadata": {}, 536 | "outputs": [], 537 | "source": [ 538 | "plt.plot(times, [m.mean.real for m in mag])" 539 | ] 540 | }, 541 | { 542 | "cell_type": "markdown", 543 | "id": "aa776dd2-d313-40fe-8bb7-ef14a4381af6", 544 | "metadata": {}, 545 | "source": [ 546 | "### 2.2 NetKet TDVP driver" 547 | ] 548 | }, 549 | { 550 | "cell_type": "markdown", 551 | "id": "70fcf415-35cb-455b-b4a1-8363f1186672", 552 | "metadata": {}, 553 | "source": [ 554 | "NetKet provides a `TDVP` driver that perform time propagation based on the same ideas we have used above, but includes a lot of feaures beyond that (in particular, Runge-Kutta adaptive and fixed step size integrators of various orders)." 555 | ] 556 | }, 557 | { 558 | "cell_type": "code", 559 | "execution_count": null, 560 | "id": "5c2131bc-5c09-424a-9058-57a6d01c49d1", 561 | "metadata": {}, 562 | "outputs": [], 563 | "source": [ 564 | "import netket.experimental as nkx" 565 | ] 566 | }, 567 | { 568 | "cell_type": "code", 569 | "execution_count": null, 570 | "id": "0a56a45e-e841-48b3-b4ae-df5bd70fd775", 571 | "metadata": { 572 | "tags": [] 573 | }, 574 | "outputs": [], 575 | "source": [ 576 | "vstate.parameters = jax.tree_map(np.copy, params0)\n", 577 | "\n", 578 | "integrator = nkx.dynamics.Euler(dt=0.001)\n", 579 | "\n", 580 | "driver = nkx.TDVP(\n", 581 | " ham1,\n", 582 | " vstate,\n", 583 | " integrator,\n", 584 | " linear_solver=nk.optimizer.solver.svd,\n", 585 | " qgt=nk.optimizer.qgt.QGTJacobianDense(holomorphic=True),\n", 586 | ")\n", 587 | "\n", 588 | "log = nk.logging.RuntimeLog()\n", 589 | "driver.run(T=1.0, obs={\"mx\": mag_x}, out=log)" 590 | ] 591 | }, 592 | { 593 | "cell_type": "code", 594 | "execution_count": null, 595 | "id": "76e16888-ac39-40ec-9319-59c6a9174078", 596 | "metadata": {}, 597 | "outputs": [], 598 | "source": [ 599 | "plt.plot(log[\"Generator\"][\"iters\"], log[\"Generator\"][\"Mean\"].real)" 600 | ] 601 | }, 602 | { 603 | "cell_type": "code", 604 | "execution_count": null, 605 | "id": "c41872ac-7370-4889-8f25-b623fd88f51f", 606 | "metadata": {}, 607 | "outputs": [], 608 | "source": [ 609 | "plt.plot(log[\"mx\"][\"iters\"], log[\"mx\"][\"Mean\"].real)" 610 | ] 611 | }, 612 | { 613 | "cell_type": "markdown", 614 | "id": "0cacd99e-7a99-4a09-81e8-8cb2ed4bc4ea", 615 | "metadata": {}, 616 | "source": [ 617 | "### 2.3 Check result for small system" 618 | ] 619 | }, 620 | { 621 | "cell_type": "code", 622 | "execution_count": null, 623 | "id": "8af55843-dd99-4cbe-91d8-47ca3867f3f8", 624 | "metadata": { 625 | "tags": [] 626 | }, 627 | "outputs": [], 628 | "source": [ 629 | "%pip install qutip" 630 | ] 631 | }, 632 | { 633 | "cell_type": "code", 634 | "execution_count": null, 635 | "id": "c3c70dbf-2290-411a-ac85-942122869b87", 636 | "metadata": {}, 637 | "outputs": [], 638 | "source": [ 639 | "import qutip" 640 | ] 641 | }, 642 | { 643 | "cell_type": "code", 644 | "execution_count": null, 645 | "id": "abc282a7-3954-4d5c-845f-8b3aaa70a2f0", 646 | "metadata": {}, 647 | "outputs": [], 648 | "source": [ 649 | "hamQ = ham1.to_qobj()\n", 650 | "mag_xQ = mag_x.to_qobj()\n", 651 | "\n", 652 | "vstate.parameters = jax.tree_map(np.copy, params0)\n", 653 | "psiQ = vstate.to_qobj()" 654 | ] 655 | }, 656 | { 657 | "cell_type": "code", 658 | "execution_count": null, 659 | "id": "1f614c4f-2b6f-46fb-b466-e9dad946cf8e", 660 | "metadata": {}, 661 | "outputs": [], 662 | "source": [ 663 | "result = qutip.sesolve(hamQ, psiQ, tlist=times, e_ops=[hamQ, mag_xQ])" 664 | ] 665 | }, 666 | { 667 | "cell_type": "code", 668 | "execution_count": null, 669 | "id": "53617d33-011a-4bca-a6b4-1f7b292b3f77", 670 | "metadata": {}, 671 | "outputs": [], 672 | "source": [ 673 | "plt.plot(log[\"mx\"][\"iters\"], log[\"mx\"][\"Mean\"].real)\n", 674 | "plt.plot(result.times, result.expect[1], \"k--\")" 675 | ] 676 | }, 677 | { 678 | "cell_type": "markdown", 679 | "id": "b4d11e2c-3b57-426d-b1e9-e30b4be18d17", 680 | "metadata": { 681 | "tags": [] 682 | }, 683 | "source": [ 684 | "It seems clear that the two-parameter Jastrow ansatz we have used is not up to the task of representing the Ising quench dynamics.\n", 685 | "\n", 686 | "So, let's use an actual neural quantum state:" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": null, 692 | "id": "77769830-fa33-4d0e-8f9b-10c068892778", 693 | "metadata": {}, 694 | "outputs": [], 695 | "source": [ 696 | "ansatz_nqs = nk.models.RBM(alpha=1, dtype=complex)\n", 697 | "sampler_nqs = nk.sampler.MetropolisLocal(hilbert, n_chains=32)\n", 698 | "vstate_nqs = nk.vqs.MCState(sampler_nqs, ansatz_nqs,\n", 699 | " n_samples=1024,\n", 700 | " sampler_seed=rng.next(), seed=rng.next())" 701 | ] 702 | }, 703 | { 704 | "cell_type": "code", 705 | "execution_count": null, 706 | "id": "91b41981-d0b9-4c02-908c-55262b110df8", 707 | "metadata": { 708 | "tags": [] 709 | }, 710 | "outputs": [], 711 | "source": [ 712 | "integrator = nkx.dynamics.Heun(dt=0.01)\n", 713 | "\n", 714 | "driver = nkx.TDVP(\n", 715 | " ham,\n", 716 | " vstate_nqs,\n", 717 | " integrator,\n", 718 | " linear_solver=nk.optimizer.solver.svd,\n", 719 | " qgt=nk.optimizer.qgt.QGTJacobianDense(holomorphic=True),\n", 720 | " propagation_type=\"imag\",\n", 721 | ")\n", 722 | "\n", 723 | "log = nk.logging.RuntimeLog()\n", 724 | "driver.run(T=5.0, obs={\"mx\": mag_x}, out=log)" 725 | ] 726 | }, 727 | { 728 | "cell_type": "markdown", 729 | "id": "a935bb1e-57a9-49e5-b103-b41538df2404", 730 | "metadata": {}, 731 | "source": [ 732 | "(Since this takes a moment, I have saved the ground state locally to have the option to load it. You don't need to do this.)" 733 | ] 734 | }, 735 | { 736 | "cell_type": "code", 737 | "execution_count": null, 738 | "id": "5f4be5e3-e5f5-445e-aa78-8cce10594fd4", 739 | "metadata": {}, 740 | "outputs": [], 741 | "source": [ 742 | "import flax" 743 | ] 744 | }, 745 | { 746 | "cell_type": "code", 747 | "execution_count": null, 748 | "id": "31d34cbe-e3a1-42d4-a498-c9eba2820616", 749 | "metadata": {}, 750 | "outputs": [], 751 | "source": [ 752 | "# with open(\"NQS_Dyn_RBM1.mpack\", \"wb\") as fp:\n", 753 | "# fp.write(flax.serialization.to_bytes(vstate_nqs.variables))" 754 | ] 755 | }, 756 | { 757 | "cell_type": "code", 758 | "execution_count": null, 759 | "id": "67e6289a-b8a9-482c-a9e2-5faf08f77942", 760 | "metadata": { 761 | "tags": [] 762 | }, 763 | "outputs": [], 764 | "source": [ 765 | "with open(\"NQS_Dyn_RBM1.mpack\", \"rb\") as fp:\n", 766 | " vstate_nqs.variables = flax.serialization.from_bytes(vstate_nqs.variables, fp.read())" 767 | ] 768 | }, 769 | { 770 | "cell_type": "code", 771 | "execution_count": null, 772 | "id": "61b4a64f-c025-422c-abc0-64871677ab83", 773 | "metadata": { 774 | "tags": [] 775 | }, 776 | "outputs": [], 777 | "source": [ 778 | "hamQ = ham1.to_qobj()\n", 779 | "mag_xQ = mag_x.to_qobj()\n", 780 | "psiQ = vstate_nqs.to_qobj()\n", 781 | "result = qutip.sesolve(hamQ, psiQ, tlist=np.linspace(0, 1.0, 100), e_ops=[hamQ, mag_xQ])" 782 | ] 783 | }, 784 | { 785 | "cell_type": "code", 786 | "execution_count": null, 787 | "id": "d0f80dbf-5aa7-4f20-8541-45b93ccfd9c1", 788 | "metadata": { 789 | "tags": [] 790 | }, 791 | "outputs": [], 792 | "source": [ 793 | "integrator = nkx.dynamics.Heun(dt=0.005)\n", 794 | "vstate_nqs.n_samples=16000\n", 795 | "driver = nkx.TDVP(\n", 796 | " ham1,\n", 797 | " vstate_nqs,\n", 798 | " integrator,\n", 799 | " linear_solver=nk.optimizer.solver.svd,\n", 800 | " qgt=nk.optimizer.qgt.QGTJacobianDense(holomorphic=True),\n", 801 | ")\n", 802 | "\n", 803 | "log = nk.logging.RuntimeLog()\n", 804 | "driver.run(T=1.0, obs={\"mx\": mag_x}, out=log)" 805 | ] 806 | }, 807 | { 808 | "cell_type": "code", 809 | "execution_count": null, 810 | "id": "5e125177-3707-46e1-8f73-9dc74dd7ae26", 811 | "metadata": {}, 812 | "outputs": [], 813 | "source": [] 814 | }, 815 | { 816 | "cell_type": "code", 817 | "execution_count": null, 818 | "id": "29c9f363-ad8c-4b1b-b7f6-8224b3374e13", 819 | "metadata": {}, 820 | "outputs": [], 821 | "source": [ 822 | "plt.plot(log[\"mx\"][\"iters\"], log[\"mx\"][\"Mean\"].real)\n", 823 | "plt.plot(result.times, result.expect[1], \"k--\")" 824 | ] 825 | }, 826 | { 827 | "cell_type": "markdown", 828 | "id": "5994fadf-e078-41b7-979c-4d793f01eb9f", 829 | "metadata": {}, 830 | "source": [ 831 | "### 2.4 Quantum geometric tensor" 832 | ] 833 | }, 834 | { 835 | "cell_type": "markdown", 836 | "id": "5cd0b0c1-05db-479d-860e-7b5cdfa072bb", 837 | "metadata": { 838 | "tags": [] 839 | }, 840 | "source": [ 841 | "As its name suggests, the quantum geometric tensor $G(\\theta)$ has a geometric meaning: It imposes a curvature on space of variational parameters. This curvature accounts for the fact that different directions in parameter space affect the quantum state to different degrees." 842 | ] 843 | }, 844 | { 845 | "cell_type": "markdown", 846 | "id": "9f108056-6e1c-4673-9944-5183f1d94136", 847 | "metadata": {}, 848 | "source": [ 849 | "The most extreme case is a parameter corresponding to a pure gauge freedom:" 850 | ] 851 | }, 852 | { 853 | "cell_type": "code", 854 | "execution_count": null, 855 | "id": "a473cf00-5a41-4069-b44f-3a303e31dee5", 856 | "metadata": {}, 857 | "outputs": [], 858 | "source": [ 859 | "class JasShortExtra(nn.Module):\n", 860 | " @nn.compact\n", 861 | " def __call__(self, x):\n", 862 | " j1 = self.param(\n", 863 | " \"j1\", nn.initializers.normal(), (1,), complex\n", 864 | " )\n", 865 | " j2 = self.param(\n", 866 | " \"j2\", nn.initializers.normal(), (1,), complex\n", 867 | " )\n", 868 | " extra = self.param(\n", 869 | " \"extra\", nn.initializers.normal(), (1,), complex\n", 870 | " )\n", 871 | "\n", 872 | " # compute the nearest-neighbor correlations\n", 873 | " corr1=x*jnp.roll(x,-1,axis=-1)\n", 874 | " corr2=x*jnp.roll(x,-2,axis=-1)\n", 875 | "\n", 876 | " # sum the output\n", 877 | " return jnp.sum(j1*corr1+j2*corr2,axis=-1) + extra" 878 | ] 879 | }, 880 | { 881 | "cell_type": "markdown", 882 | "id": "bd0495bc-e019-410d-9ecb-9d72b78a323f", 883 | "metadata": {}, 884 | "source": [ 885 | "Our wave function is now\n", 886 | "$$ \\langle \\sigma^{z}_1,\\dots \\sigma^{z}_N| \\Psi_{\\mathrm{jas}} \\rangle = \\mathtt{extra} \\cdot \\exp \\left( \\sum_i J_1 \\sigma^{z}_i\\sigma^{z}_{i+1} + J_2 \\sigma^{z}_i\\sigma^{z}_{i+2} \\right),$$ which only changes norm and global phase of the quantum state.\n", 887 | "\n", 888 | "Let's see how this affects the QGT:" 889 | ] 890 | }, 891 | { 892 | "cell_type": "code", 893 | "execution_count": null, 894 | "id": "bd64180f-8903-446f-b94a-e0eaa84e56b0", 895 | "metadata": {}, 896 | "outputs": [], 897 | "source": [ 898 | "jastrow = JasShortExtra()\n", 899 | "vs = nk.vqs.MCState(sampler, jastrow)\n", 900 | "G = vs.quantum_geometric_tensor().to_dense()" 901 | ] 902 | }, 903 | { 904 | "cell_type": "code", 905 | "execution_count": null, 906 | "id": "107d336d-429f-465b-a879-2b56eab31370", 907 | "metadata": {}, 908 | "outputs": [], 909 | "source": [ 910 | "G" 911 | ] 912 | }, 913 | { 914 | "cell_type": "markdown", 915 | "id": "65d9d874-f597-4aef-b352-4020fe940f9a", 916 | "metadata": {}, 917 | "source": [ 918 | "The gauge freedom creates a subspace that is anihilated by the QGT. Since this gauge freedom is exactly along the direction of the parameter `extra`, it is immediately visible in the QGT matrix. Generally, such redundant directions can be seen in the QGT's spectrum:" 919 | ] 920 | }, 921 | { 922 | "cell_type": "code", 923 | "execution_count": null, 924 | "id": "bf34692f-b818-4769-b9c6-e78b91929d63", 925 | "metadata": {}, 926 | "outputs": [], 927 | "source": [ 928 | "jnp.linalg.eigvalsh(G)" 929 | ] 930 | }, 931 | { 932 | "cell_type": "markdown", 933 | "id": "6daafc13-1cc1-4424-b6a4-9847334cfa9b", 934 | "metadata": {}, 935 | "source": [ 936 | "For a neural quantum state, there is no single parameter that only changes gauge degrees of freedom. The QGT is fully dense:" 937 | ] 938 | }, 939 | { 940 | "cell_type": "code", 941 | "execution_count": null, 942 | "id": "ce8538e2-66cd-4dd1-8991-a45c800d823d", 943 | "metadata": {}, 944 | "outputs": [], 945 | "source": [ 946 | "vstate_nqs.init_parameters()\n", 947 | "G = vstate_nqs.quantum_geometric_tensor().to_dense()" 948 | ] 949 | }, 950 | { 951 | "cell_type": "code", 952 | "execution_count": null, 953 | "id": "3cbd2df1-79a3-4131-b15c-d35d24a85dc0", 954 | "metadata": { 955 | "tags": [] 956 | }, 957 | "outputs": [], 958 | "source": [ 959 | "G" 960 | ] 961 | }, 962 | { 963 | "cell_type": "code", 964 | "execution_count": null, 965 | "id": "b0cbe944-5357-4d98-9d97-a91f7a2642b5", 966 | "metadata": {}, 967 | "outputs": [], 968 | "source": [ 969 | "spectrum = jnp.linalg.eigvalsh(G + 0.01 * np.eye(G.shape[0]))" 970 | ] 971 | }, 972 | { 973 | "cell_type": "code", 974 | "execution_count": null, 975 | "id": "acf63ad4-f843-49c5-8074-67c99955da18", 976 | "metadata": {}, 977 | "outputs": [], 978 | "source": [ 979 | "plt.plot(spectrum)\n", 980 | "plt.semilogy()" 981 | ] 982 | }, 983 | { 984 | "cell_type": "markdown", 985 | "id": "88e7cf04-b0a0-49a1-bd85-31e8eab35c26", 986 | "metadata": {}, 987 | "source": [ 988 | "This is a typical example for an NQS (especially with one that has more hidden units than the shallow $\\alpha = 1$ RBM): Eigenvalues of the QGT span several orders of magnitude, making the solution of the t-VMC equation sensitive to noise.\n", 989 | "\n", 990 | "This can make it necessary to use regularization methods (diagonal shift, spectral cutoff for the QGT, or more advanced methods -- references have been given in Giuseppe's lecture.)" 991 | ] 992 | }, 993 | { 994 | "cell_type": "code", 995 | "execution_count": null, 996 | "id": "6cb0e14b-1a9b-4f56-92fd-b58e72e937f1", 997 | "metadata": {}, 998 | "outputs": [], 999 | "source": [] 1000 | } 1001 | ], 1002 | "metadata": { 1003 | "colab": { 1004 | "collapsed_sections": [], 1005 | "name": "1-start.ipynb", 1006 | "provenance": [] 1007 | }, 1008 | "kernelspec": { 1009 | "display_name": "Python 3", 1010 | "language": "python", 1011 | "name": "python3" 1012 | }, 1013 | "language_info": { 1014 | "codemirror_mode": { 1015 | "name": "ipython", 1016 | "version": 3 1017 | }, 1018 | "file_extension": ".py", 1019 | "mimetype": "text/x-python", 1020 | "name": "python", 1021 | "nbconvert_exporter": "python", 1022 | "pygments_lexer": "ipython3", 1023 | "version": "3.8.10" 1024 | } 1025 | }, 1026 | "nbformat": 4, 1027 | "nbformat_minor": 5 1028 | } 1029 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/Manifest.toml: -------------------------------------------------------------------------------- 1 | # This file is machine-generated - editing it directly is not advised 2 | 3 | julia_version = "1.7.3" 4 | manifest_format = "2.0" 5 | 6 | [[deps.Adapt]] 7 | deps = ["LinearAlgebra"] 8 | git-tree-sha1 = "af92965fb30777147966f58acb05da51c5616b5f" 9 | uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" 10 | version = "3.3.3" 11 | 12 | [[deps.ArgTools]] 13 | uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" 14 | 15 | [[deps.Artifacts]] 16 | uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" 17 | 18 | [[deps.Base64]] 19 | uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" 20 | 21 | [[deps.Bzip2_jll]] 22 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 23 | git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2" 24 | uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" 25 | version = "1.0.8+0" 26 | 27 | [[deps.Cairo_jll]] 28 | deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] 29 | git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2" 30 | uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" 31 | version = "1.16.1+1" 32 | 33 | [[deps.ChainRulesCore]] 34 | deps = ["Compat", "LinearAlgebra", "SparseArrays"] 35 | git-tree-sha1 = "9489214b993cd42d17f44c36e359bf6a7c919abf" 36 | uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" 37 | version = "1.15.0" 38 | 39 | [[deps.ChangesOfVariables]] 40 | deps = ["ChainRulesCore", "LinearAlgebra", "Test"] 41 | git-tree-sha1 = "1e315e3f4b0b7ce40feded39c73049692126cf53" 42 | uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" 43 | version = "0.1.3" 44 | 45 | [[deps.ColorSchemes]] 46 | deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Random"] 47 | git-tree-sha1 = "7297381ccb5df764549818d9a7d57e45f1057d30" 48 | uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" 49 | version = "3.18.0" 50 | 51 | [[deps.ColorTypes]] 52 | deps = ["FixedPointNumbers", "Random"] 53 | git-tree-sha1 = "0f4e115f6f34bbe43c19751c90a38b2f380637b9" 54 | uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" 55 | version = "0.11.3" 56 | 57 | [[deps.ColorVectorSpace]] 58 | deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "SpecialFunctions", "Statistics", "TensorCore"] 59 | git-tree-sha1 = "d08c20eef1f2cbc6e60fd3612ac4340b89fea322" 60 | uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4" 61 | version = "0.9.9" 62 | 63 | [[deps.Colors]] 64 | deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] 65 | git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40" 66 | uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" 67 | version = "0.12.8" 68 | 69 | [[deps.Compat]] 70 | deps = ["Dates", "LinearAlgebra", "UUIDs"] 71 | git-tree-sha1 = "924cdca592bc16f14d2f7006754a621735280b74" 72 | uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" 73 | version = "4.1.0" 74 | 75 | [[deps.CompilerSupportLibraries_jll]] 76 | deps = ["Artifacts", "Libdl"] 77 | uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" 78 | 79 | [[deps.Contour]] 80 | deps = ["StaticArrays"] 81 | git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7" 82 | uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" 83 | version = "0.5.7" 84 | 85 | [[deps.DataAPI]] 86 | git-tree-sha1 = "fb5f5316dd3fd4c5e7c30a24d50643b73e37cd40" 87 | uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" 88 | version = "1.10.0" 89 | 90 | [[deps.DataStructures]] 91 | deps = ["Compat", "InteractiveUtils", "OrderedCollections"] 92 | git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0" 93 | uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" 94 | version = "0.18.13" 95 | 96 | [[deps.DataValueInterfaces]] 97 | git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" 98 | uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" 99 | version = "1.0.0" 100 | 101 | [[deps.Dates]] 102 | deps = ["Printf"] 103 | uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" 104 | 105 | [[deps.DelimitedFiles]] 106 | deps = ["Mmap"] 107 | uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" 108 | 109 | [[deps.DocStringExtensions]] 110 | deps = ["LibGit2"] 111 | git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b" 112 | uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" 113 | version = "0.8.6" 114 | 115 | [[deps.Downloads]] 116 | deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] 117 | uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" 118 | 119 | [[deps.EarCut_jll]] 120 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 121 | git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d" 122 | uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" 123 | version = "2.2.3+0" 124 | 125 | [[deps.Expat_jll]] 126 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 127 | git-tree-sha1 = "bad72f730e9e91c08d9427d5e8db95478a3c323d" 128 | uuid = "2e619515-83b5-522b-bb60-26c02a35a201" 129 | version = "2.4.8+0" 130 | 131 | [[deps.FFMPEG]] 132 | deps = ["FFMPEG_jll"] 133 | git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8" 134 | uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" 135 | version = "0.4.1" 136 | 137 | [[deps.FFMPEG_jll]] 138 | deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] 139 | git-tree-sha1 = "d8a578692e3077ac998b50c0217dfd67f21d1e5f" 140 | uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" 141 | version = "4.4.0+0" 142 | 143 | [[deps.FileWatching]] 144 | uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" 145 | 146 | [[deps.FixedPointNumbers]] 147 | deps = ["Statistics"] 148 | git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" 149 | uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" 150 | version = "0.8.4" 151 | 152 | [[deps.Fontconfig_jll]] 153 | deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] 154 | git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03" 155 | uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" 156 | version = "2.13.93+0" 157 | 158 | [[deps.Formatting]] 159 | deps = ["Printf"] 160 | git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" 161 | uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" 162 | version = "0.4.2" 163 | 164 | [[deps.FreeType2_jll]] 165 | deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] 166 | git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9" 167 | uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" 168 | version = "2.10.4+0" 169 | 170 | [[deps.FriBidi_jll]] 171 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 172 | git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91" 173 | uuid = "559328eb-81f9-559d-9380-de523a88c83c" 174 | version = "1.0.10+0" 175 | 176 | [[deps.GLFW_jll]] 177 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] 178 | git-tree-sha1 = "51d2dfe8e590fbd74e7a842cf6d13d8a2f45dc01" 179 | uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" 180 | version = "3.3.6+0" 181 | 182 | [[deps.GR]] 183 | deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "RelocatableFolders", "Serialization", "Sockets", "Test", "UUIDs"] 184 | git-tree-sha1 = "c98aea696662d09e215ef7cda5296024a9646c75" 185 | uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" 186 | version = "0.64.4" 187 | 188 | [[deps.GR_jll]] 189 | deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] 190 | git-tree-sha1 = "3a233eeeb2ca45842fe100e0413936834215abf5" 191 | uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" 192 | version = "0.64.4+0" 193 | 194 | [[deps.GeometryBasics]] 195 | deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] 196 | git-tree-sha1 = "83ea630384a13fc4f002b77690bc0afeb4255ac9" 197 | uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" 198 | version = "0.4.2" 199 | 200 | [[deps.Gettext_jll]] 201 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] 202 | git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" 203 | uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" 204 | version = "0.21.0+0" 205 | 206 | [[deps.Glib_jll]] 207 | deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"] 208 | git-tree-sha1 = "a32d672ac2c967f3deb8a81d828afc739c838a06" 209 | uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" 210 | version = "2.68.3+2" 211 | 212 | [[deps.Graphite2_jll]] 213 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 214 | git-tree-sha1 = "344bf40dcab1073aca04aa0df4fb092f920e4011" 215 | uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472" 216 | version = "1.3.14+0" 217 | 218 | [[deps.Grisu]] 219 | git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" 220 | uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" 221 | version = "1.0.2" 222 | 223 | [[deps.HTTP]] 224 | deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] 225 | git-tree-sha1 = "0fa77022fe4b511826b39c894c90daf5fce3334a" 226 | uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" 227 | version = "0.9.17" 228 | 229 | [[deps.HarfBuzz_jll]] 230 | deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] 231 | git-tree-sha1 = "129acf094d168394e80ee1dc4bc06ec835e510a3" 232 | uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566" 233 | version = "2.8.1+1" 234 | 235 | [[deps.IniFile]] 236 | git-tree-sha1 = "f550e6e32074c939295eb5ea6de31849ac2c9625" 237 | uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" 238 | version = "0.5.1" 239 | 240 | [[deps.InteractiveUtils]] 241 | deps = ["Markdown"] 242 | uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" 243 | 244 | [[deps.InverseFunctions]] 245 | deps = ["Test"] 246 | git-tree-sha1 = "b3364212fb5d870f724876ffcd34dd8ec6d98918" 247 | uuid = "3587e190-3f89-42d0-90ee-14403ec27112" 248 | version = "0.1.7" 249 | 250 | [[deps.IrrationalConstants]] 251 | git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151" 252 | uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" 253 | version = "0.1.1" 254 | 255 | [[deps.IterTools]] 256 | git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5" 257 | uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" 258 | version = "1.4.0" 259 | 260 | [[deps.IteratorInterfaceExtensions]] 261 | git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" 262 | uuid = "82899510-4779-5014-852e-03e436cf321d" 263 | version = "1.0.0" 264 | 265 | [[deps.JLLWrappers]] 266 | deps = ["Preferences"] 267 | git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" 268 | uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" 269 | version = "1.4.1" 270 | 271 | [[deps.JSON]] 272 | deps = ["Dates", "Mmap", "Parsers", "Unicode"] 273 | git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" 274 | uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" 275 | version = "0.21.3" 276 | 277 | [[deps.JpegTurbo_jll]] 278 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 279 | git-tree-sha1 = "b53380851c6e6664204efb2e62cd24fa5c47e4ba" 280 | uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" 281 | version = "2.1.2+0" 282 | 283 | [[deps.LAME_jll]] 284 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 285 | git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c" 286 | uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" 287 | version = "3.100.1+0" 288 | 289 | [[deps.LERC_jll]] 290 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 291 | git-tree-sha1 = "bf36f528eec6634efc60d7ec062008f171071434" 292 | uuid = "88015f11-f218-50d7-93a8-a6af411a945d" 293 | version = "3.0.0+1" 294 | 295 | [[deps.LZO_jll]] 296 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 297 | git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" 298 | uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" 299 | version = "2.10.1+0" 300 | 301 | [[deps.LaTeXStrings]] 302 | git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996" 303 | uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" 304 | version = "1.3.0" 305 | 306 | [[deps.Latexify]] 307 | deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] 308 | git-tree-sha1 = "46a39b9c58749eefb5f2dc1178cb8fab5332b1ab" 309 | uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" 310 | version = "0.15.15" 311 | 312 | [[deps.LibCURL]] 313 | deps = ["LibCURL_jll", "MozillaCACerts_jll"] 314 | uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" 315 | 316 | [[deps.LibCURL_jll]] 317 | deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] 318 | uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" 319 | 320 | [[deps.LibGit2]] 321 | deps = ["Base64", "NetworkOptions", "Printf", "SHA"] 322 | uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" 323 | 324 | [[deps.LibSSH2_jll]] 325 | deps = ["Artifacts", "Libdl", "MbedTLS_jll"] 326 | uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" 327 | 328 | [[deps.Libdl]] 329 | uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" 330 | 331 | [[deps.Libffi_jll]] 332 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 333 | git-tree-sha1 = "0b4a5d71f3e5200a7dff793393e09dfc2d874290" 334 | uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" 335 | version = "3.2.2+1" 336 | 337 | [[deps.Libgcrypt_jll]] 338 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] 339 | git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" 340 | uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" 341 | version = "1.8.7+0" 342 | 343 | [[deps.Libglvnd_jll]] 344 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] 345 | git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf" 346 | uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" 347 | version = "1.3.0+3" 348 | 349 | [[deps.Libgpg_error_jll]] 350 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 351 | git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" 352 | uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" 353 | version = "1.42.0+0" 354 | 355 | [[deps.Libiconv_jll]] 356 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 357 | git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778" 358 | uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" 359 | version = "1.16.1+1" 360 | 361 | [[deps.Libmount_jll]] 362 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 363 | git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" 364 | uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" 365 | version = "2.35.0+0" 366 | 367 | [[deps.Libtiff_jll]] 368 | deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "LERC_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] 369 | git-tree-sha1 = "3eb79b0ca5764d4799c06699573fd8f533259713" 370 | uuid = "89763e89-9b03-5906-acba-b20f662cd828" 371 | version = "4.4.0+0" 372 | 373 | [[deps.Libuuid_jll]] 374 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 375 | git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" 376 | uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" 377 | version = "2.36.0+0" 378 | 379 | [[deps.LinearAlgebra]] 380 | deps = ["Libdl", "libblastrampoline_jll"] 381 | uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 382 | 383 | [[deps.LogExpFunctions]] 384 | deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] 385 | git-tree-sha1 = "09e4b894ce6a976c354a69041a04748180d43637" 386 | uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" 387 | version = "0.3.15" 388 | 389 | [[deps.Logging]] 390 | uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" 391 | 392 | [[deps.MacroTools]] 393 | deps = ["Markdown", "Random"] 394 | git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf" 395 | uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" 396 | version = "0.5.9" 397 | 398 | [[deps.Markdown]] 399 | deps = ["Base64"] 400 | uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" 401 | 402 | [[deps.MbedTLS]] 403 | deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] 404 | git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" 405 | uuid = "739be429-bea8-5141-9913-cc70e7f3736d" 406 | version = "1.0.3" 407 | 408 | [[deps.MbedTLS_jll]] 409 | deps = ["Artifacts", "Libdl"] 410 | uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" 411 | 412 | [[deps.Measures]] 413 | git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" 414 | uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" 415 | version = "0.3.1" 416 | 417 | [[deps.Missings]] 418 | deps = ["DataAPI"] 419 | git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f" 420 | uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" 421 | version = "1.0.2" 422 | 423 | [[deps.Mmap]] 424 | uuid = "a63ad114-7e13-5084-954f-fe012c677804" 425 | 426 | [[deps.MozillaCACerts_jll]] 427 | uuid = "14a3606d-f60d-562e-9121-12d972cd8159" 428 | 429 | [[deps.NaNMath]] 430 | git-tree-sha1 = "737a5957f387b17e74d4ad2f440eb330b39a62c5" 431 | uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" 432 | version = "1.0.0" 433 | 434 | [[deps.NetworkOptions]] 435 | uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" 436 | 437 | [[deps.Ogg_jll]] 438 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 439 | git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f" 440 | uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" 441 | version = "1.3.5+1" 442 | 443 | [[deps.OpenBLAS_jll]] 444 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] 445 | uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" 446 | 447 | [[deps.OpenLibm_jll]] 448 | deps = ["Artifacts", "Libdl"] 449 | uuid = "05823500-19ac-5b8b-9628-191a04bc5112" 450 | 451 | [[deps.OpenSSL_jll]] 452 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 453 | git-tree-sha1 = "ab05aa4cc89736e95915b01e7279e61b1bfe33b8" 454 | uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" 455 | version = "1.1.14+0" 456 | 457 | [[deps.OpenSpecFun_jll]] 458 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] 459 | git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" 460 | uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" 461 | version = "0.5.5+0" 462 | 463 | [[deps.Opus_jll]] 464 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 465 | git-tree-sha1 = "51a08fb14ec28da2ec7a927c4337e4332c2a4720" 466 | uuid = "91d4177d-7536-5919-b921-800302f37372" 467 | version = "1.3.2+0" 468 | 469 | [[deps.OrderedCollections]] 470 | git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" 471 | uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" 472 | version = "1.4.1" 473 | 474 | [[deps.PCRE_jll]] 475 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 476 | git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488" 477 | uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc" 478 | version = "8.44.0+0" 479 | 480 | [[deps.Parsers]] 481 | deps = ["Dates"] 482 | git-tree-sha1 = "1285416549ccfcdf0c50d4997a94331e88d68413" 483 | uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" 484 | version = "2.3.1" 485 | 486 | [[deps.Pixman_jll]] 487 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 488 | git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" 489 | uuid = "30392449-352a-5448-841d-b1acce4e97dc" 490 | version = "0.40.1+0" 491 | 492 | [[deps.Pkg]] 493 | deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] 494 | uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" 495 | 496 | [[deps.PlotThemes]] 497 | deps = ["PlotUtils", "Statistics"] 498 | git-tree-sha1 = "8162b2f8547bc23876edd0c5181b27702ae58dce" 499 | uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" 500 | version = "3.0.0" 501 | 502 | [[deps.PlotUtils]] 503 | deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] 504 | git-tree-sha1 = "bb16469fd5224100e422f0b027d26c5a25de1200" 505 | uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" 506 | version = "1.2.0" 507 | 508 | [[deps.Plots]] 509 | deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "Unzip"] 510 | git-tree-sha1 = "9e42de869561d6bdf8602c57ec557d43538a92f0" 511 | uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" 512 | version = "1.29.1" 513 | 514 | [[deps.Preferences]] 515 | deps = ["TOML"] 516 | git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" 517 | uuid = "21216c6a-2e73-6563-6e65-726566657250" 518 | version = "1.3.0" 519 | 520 | [[deps.Printf]] 521 | deps = ["Unicode"] 522 | uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" 523 | 524 | [[deps.Qt5Base_jll]] 525 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"] 526 | git-tree-sha1 = "c6c0f690d0cc7caddb74cef7aa847b824a16b256" 527 | uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1" 528 | version = "5.15.3+1" 529 | 530 | [[deps.REPL]] 531 | deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] 532 | uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" 533 | 534 | [[deps.Random]] 535 | deps = ["SHA", "Serialization"] 536 | uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 537 | 538 | [[deps.RecipesBase]] 539 | git-tree-sha1 = "6bf3f380ff52ce0832ddd3a2a7b9538ed1bcca7d" 540 | uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" 541 | version = "1.2.1" 542 | 543 | [[deps.RecipesPipeline]] 544 | deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] 545 | git-tree-sha1 = "dc1e451e15d90347a7decc4221842a022b011714" 546 | uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" 547 | version = "0.5.2" 548 | 549 | [[deps.Reexport]] 550 | git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" 551 | uuid = "189a3867-3050-52da-a836-e630ba90ab69" 552 | version = "1.2.2" 553 | 554 | [[deps.RelocatableFolders]] 555 | deps = ["SHA", "Scratch"] 556 | git-tree-sha1 = "cdbd3b1338c72ce29d9584fdbe9e9b70eeb5adca" 557 | uuid = "05181044-ff0b-4ac5-8273-598c1e38db00" 558 | version = "0.1.3" 559 | 560 | [[deps.Requires]] 561 | deps = ["UUIDs"] 562 | git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" 563 | uuid = "ae029012-a4dd-5104-9daa-d747884805df" 564 | version = "1.3.0" 565 | 566 | [[deps.SHA]] 567 | uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" 568 | 569 | [[deps.Scratch]] 570 | deps = ["Dates"] 571 | git-tree-sha1 = "0b4b7f1393cff97c33891da2a0bf69c6ed241fda" 572 | uuid = "6c6a2e73-6563-6170-7368-637461726353" 573 | version = "1.1.0" 574 | 575 | [[deps.Serialization]] 576 | uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" 577 | 578 | [[deps.Showoff]] 579 | deps = ["Dates", "Grisu"] 580 | git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" 581 | uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" 582 | version = "1.0.3" 583 | 584 | [[deps.Sockets]] 585 | uuid = "6462fe0b-24de-5631-8697-dd941f90decc" 586 | 587 | [[deps.SortingAlgorithms]] 588 | deps = ["DataStructures"] 589 | git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508" 590 | uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" 591 | version = "1.0.1" 592 | 593 | [[deps.SparseArrays]] 594 | deps = ["LinearAlgebra", "Random"] 595 | uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 596 | 597 | [[deps.SpecialFunctions]] 598 | deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] 599 | git-tree-sha1 = "a9e798cae4867e3a41cae2dd9eb60c047f1212db" 600 | uuid = "276daf66-3868-5448-9aa4-cd146d93841b" 601 | version = "2.1.6" 602 | 603 | [[deps.StaticArrays]] 604 | deps = ["LinearAlgebra", "Random", "Statistics"] 605 | git-tree-sha1 = "2bbd9f2e40afd197a1379aef05e0d85dba649951" 606 | uuid = "90137ffa-7385-5640-81b9-e52037218182" 607 | version = "1.4.7" 608 | 609 | [[deps.Statistics]] 610 | deps = ["LinearAlgebra", "SparseArrays"] 611 | uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 612 | 613 | [[deps.StatsAPI]] 614 | deps = ["LinearAlgebra"] 615 | git-tree-sha1 = "2c11d7290036fe7aac9038ff312d3b3a2a5bf89e" 616 | uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" 617 | version = "1.4.0" 618 | 619 | [[deps.StatsBase]] 620 | deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] 621 | git-tree-sha1 = "8977b17906b0a1cc74ab2e3a05faa16cf08a8291" 622 | uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 623 | version = "0.33.16" 624 | 625 | [[deps.StructArrays]] 626 | deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"] 627 | git-tree-sha1 = "9abba8f8fb8458e9adf07c8a2377a070674a24f1" 628 | uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" 629 | version = "0.6.8" 630 | 631 | [[deps.TOML]] 632 | deps = ["Dates"] 633 | uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" 634 | 635 | [[deps.TableTraits]] 636 | deps = ["IteratorInterfaceExtensions"] 637 | git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" 638 | uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" 639 | version = "1.0.1" 640 | 641 | [[deps.Tables]] 642 | deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"] 643 | git-tree-sha1 = "5ce79ce186cc678bbb5c5681ca3379d1ddae11a1" 644 | uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" 645 | version = "1.7.0" 646 | 647 | [[deps.Tar]] 648 | deps = ["ArgTools", "SHA"] 649 | uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" 650 | 651 | [[deps.TensorCore]] 652 | deps = ["LinearAlgebra"] 653 | git-tree-sha1 = "1feb45f88d133a655e001435632f019a9a1bcdb6" 654 | uuid = "62fd8b95-f654-4bbd-a8a5-9c27f68ccd50" 655 | version = "0.1.1" 656 | 657 | [[deps.Test]] 658 | deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] 659 | uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 660 | 661 | [[deps.URIs]] 662 | git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" 663 | uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" 664 | version = "1.3.0" 665 | 666 | [[deps.UUIDs]] 667 | deps = ["Random", "SHA"] 668 | uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" 669 | 670 | [[deps.Unicode]] 671 | uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" 672 | 673 | [[deps.UnicodeFun]] 674 | deps = ["REPL"] 675 | git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf" 676 | uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" 677 | version = "0.4.1" 678 | 679 | [[deps.Unzip]] 680 | git-tree-sha1 = "34db80951901073501137bdbc3d5a8e7bbd06670" 681 | uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" 682 | version = "0.1.2" 683 | 684 | [[deps.Wayland_jll]] 685 | deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] 686 | git-tree-sha1 = "3e61f0b86f90dacb0bc0e73a0c5a83f6a8636e23" 687 | uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" 688 | version = "1.19.0+0" 689 | 690 | [[deps.Wayland_protocols_jll]] 691 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 692 | git-tree-sha1 = "4528479aa01ee1b3b4cd0e6faef0e04cf16466da" 693 | uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" 694 | version = "1.25.0+0" 695 | 696 | [[deps.XML2_jll]] 697 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] 698 | git-tree-sha1 = "58443b63fb7e465a8a7210828c91c08b92132dff" 699 | uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" 700 | version = "2.9.14+0" 701 | 702 | [[deps.XSLT_jll]] 703 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] 704 | git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a" 705 | uuid = "aed1982a-8fda-507f-9586-7b0439959a61" 706 | version = "1.1.34+0" 707 | 708 | [[deps.Xorg_libX11_jll]] 709 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] 710 | git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" 711 | uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" 712 | version = "1.6.9+4" 713 | 714 | [[deps.Xorg_libXau_jll]] 715 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 716 | git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" 717 | uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" 718 | version = "1.0.9+4" 719 | 720 | [[deps.Xorg_libXcursor_jll]] 721 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] 722 | git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd" 723 | uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" 724 | version = "1.2.0+4" 725 | 726 | [[deps.Xorg_libXdmcp_jll]] 727 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 728 | git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" 729 | uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" 730 | version = "1.1.3+4" 731 | 732 | [[deps.Xorg_libXext_jll]] 733 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] 734 | git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" 735 | uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" 736 | version = "1.3.4+4" 737 | 738 | [[deps.Xorg_libXfixes_jll]] 739 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] 740 | git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4" 741 | uuid = "d091e8ba-531a-589c-9de9-94069b037ed8" 742 | version = "5.0.3+4" 743 | 744 | [[deps.Xorg_libXi_jll]] 745 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"] 746 | git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246" 747 | uuid = "a51aa0fd-4e3c-5386-b890-e753decda492" 748 | version = "1.7.10+4" 749 | 750 | [[deps.Xorg_libXinerama_jll]] 751 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"] 752 | git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123" 753 | uuid = "d1454406-59df-5ea1-beac-c340f2130bc3" 754 | version = "1.1.4+4" 755 | 756 | [[deps.Xorg_libXrandr_jll]] 757 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"] 758 | git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631" 759 | uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" 760 | version = "1.5.2+4" 761 | 762 | [[deps.Xorg_libXrender_jll]] 763 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] 764 | git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" 765 | uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" 766 | version = "0.9.10+4" 767 | 768 | [[deps.Xorg_libpthread_stubs_jll]] 769 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 770 | git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" 771 | uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" 772 | version = "0.1.0+3" 773 | 774 | [[deps.Xorg_libxcb_jll]] 775 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] 776 | git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" 777 | uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" 778 | version = "1.13.0+3" 779 | 780 | [[deps.Xorg_libxkbfile_jll]] 781 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] 782 | git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" 783 | uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" 784 | version = "1.1.0+4" 785 | 786 | [[deps.Xorg_xcb_util_image_jll]] 787 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] 788 | git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97" 789 | uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b" 790 | version = "0.4.0+1" 791 | 792 | [[deps.Xorg_xcb_util_jll]] 793 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"] 794 | git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1" 795 | uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5" 796 | version = "0.4.0+1" 797 | 798 | [[deps.Xorg_xcb_util_keysyms_jll]] 799 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] 800 | git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00" 801 | uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7" 802 | version = "0.4.0+1" 803 | 804 | [[deps.Xorg_xcb_util_renderutil_jll]] 805 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] 806 | git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e" 807 | uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e" 808 | version = "0.3.9+1" 809 | 810 | [[deps.Xorg_xcb_util_wm_jll]] 811 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] 812 | git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67" 813 | uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" 814 | version = "0.4.1+1" 815 | 816 | [[deps.Xorg_xkbcomp_jll]] 817 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] 818 | git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" 819 | uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" 820 | version = "1.4.2+4" 821 | 822 | [[deps.Xorg_xkeyboard_config_jll]] 823 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] 824 | git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" 825 | uuid = "33bec58e-1273-512f-9401-5d533626f822" 826 | version = "2.27.0+4" 827 | 828 | [[deps.Xorg_xtrans_jll]] 829 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 830 | git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" 831 | uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" 832 | version = "1.4.0+3" 833 | 834 | [[deps.Zlib_jll]] 835 | deps = ["Libdl"] 836 | uuid = "83775a58-1f1d-513f-b197-d71354ab007a" 837 | 838 | [[deps.Zstd_jll]] 839 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 840 | git-tree-sha1 = "e45044cd873ded54b6a5bac0eb5c971392cf1927" 841 | uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" 842 | version = "1.5.2+0" 843 | 844 | [[deps.libass_jll]] 845 | deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] 846 | git-tree-sha1 = "5982a94fcba20f02f42ace44b9894ee2b140fe47" 847 | uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" 848 | version = "0.15.1+0" 849 | 850 | [[deps.libblastrampoline_jll]] 851 | deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] 852 | uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" 853 | 854 | [[deps.libfdk_aac_jll]] 855 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 856 | git-tree-sha1 = "daacc84a041563f965be61859a36e17c4e4fcd55" 857 | uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" 858 | version = "2.0.2+0" 859 | 860 | [[deps.libpng_jll]] 861 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] 862 | git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" 863 | uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" 864 | version = "1.6.38+0" 865 | 866 | [[deps.libvorbis_jll]] 867 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] 868 | git-tree-sha1 = "b910cb81ef3fe6e78bf6acee440bda86fd6ae00c" 869 | uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a" 870 | version = "1.3.7+1" 871 | 872 | [[deps.nghttp2_jll]] 873 | deps = ["Artifacts", "Libdl"] 874 | uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" 875 | 876 | [[deps.p7zip_jll]] 877 | deps = ["Artifacts", "Libdl"] 878 | uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" 879 | 880 | [[deps.x264_jll]] 881 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 882 | git-tree-sha1 = "4fea590b89e6ec504593146bf8b988b2c00922b2" 883 | uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" 884 | version = "2021.5.5+0" 885 | 886 | [[deps.x265_jll]] 887 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 888 | git-tree-sha1 = "ee567a171cce03570d77ad3a43e90218e38937a9" 889 | uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" 890 | version = "3.5.0+0" 891 | 892 | [[deps.xkbcommon_jll]] 893 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] 894 | git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6" 895 | uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd" 896 | version = "0.9.1+5" 897 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" 3 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/sharedproject/Manifest.toml: -------------------------------------------------------------------------------- 1 | # This file is machine-generated - editing it directly is not advised 2 | 3 | [[Useless]] 4 | git-tree-sha1 = "65c93cb5e6b1dd638f2cb44bb5f66bc427523868" 5 | repo-rev = "master" 6 | repo-url = "http://github.com/crstnbr/Useless.jl" 7 | uuid = "844dae10-c0bf-11e8-151a-ff650c5ba20c" 8 | version = "0.1.0" 9 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/sharedproject/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Useless = "844dae10-c0bf-11e8-151a-ff650c5ba20c" 3 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/sharedproject/code.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Instantiating our colleagues environment" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "**IJulia automatically looks for an environment (a `Project.toml` file) and activates it.**\n", 15 | "\n", 16 | "(It traverses the file system from the current directory upwards.)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "metadata": {}, 23 | "outputs": [ 24 | { 25 | "name": "stdout", 26 | "output_type": "stream", 27 | "text": [ 28 | "\u001b[32m\u001b[1m Status\u001b[22m\u001b[39m `C:\\Users\\carsten\\Desktop\\JuliaOulu20\\DayTwo\\1_environments\\sharedproject\\Project.toml`\n", 29 | "\u001b[31m→\u001b[39m\u001b[90m [844dae10]\u001b[39m\u001b[37m Useless v0.1.0 #master (http://github.com/crstnbr/Useless.jl)\u001b[39m\n" 30 | ] 31 | }, 32 | { 33 | "name": "stderr", 34 | "output_type": "stream", 35 | "text": [ 36 | "┌ Warning: Some packages (indicated with a red arrow) are not downloaded, use `instantiate` to instantiate the current environment\n", 37 | "└ @ Pkg.Display D:\\buildbot\\worker\\package_win64\\build\\usr\\share\\julia\\stdlib\\v1.3\\Pkg\\src\\Display.jl:233\n" 38 | ] 39 | } 40 | ], 41 | "source": [ 42 | "] status" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "Julia tells us, that we don't have all the necessary package (versions) installed. One command is enough to get them all." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 2, 55 | "metadata": {}, 56 | "outputs": [ 57 | { 58 | "name": "stdout", 59 | "output_type": "stream", 60 | "text": [ 61 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m registry at `C:\\Users\\carsten\\.julia\\registries\\General`\n", 62 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m git-repo `https://github.com/JuliaRegistries/General.git`\n", 63 | "\u001b[?25l\u001b[2K\u001b[?25h\u001b[32m\u001b[1m Cloning\u001b[22m\u001b[39m git-repo `http://github.com/crstnbr/Useless.jl`\n", 64 | "\u001b[2K\u001b[?25h\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m git-repo `http://github.com/crstnbr/Useless.jl` %\n", 65 | "\u001b[?25l\u001b[2K\u001b[?25h" 66 | ] 67 | } 68 | ], 69 | "source": [ 70 | "] instantiate" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 3, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "\u001b[32m\u001b[1m Status\u001b[22m\u001b[39m `C:\\Users\\carsten\\Desktop\\JuliaOulu20\\DayTwo\\1_environments\\sharedproject\\Project.toml`\n", 83 | " \u001b[90m [844dae10]\u001b[39m\u001b[37m Useless v0.1.0 #master (http://github.com/crstnbr/Useless.jl)\u001b[39m\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "] status" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "# Our colleagues file" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "# Our colleague's code depends on a useless package\n", 105 | "# (https://github.com/crstnbr/Useless.jl)\n", 106 | "using Useless\n", 107 | "\n", 108 | "life_universe_everything(question) = 42\n", 109 | "\n", 110 | "println(life_universe_everything(\"What is the meaning of life?\"))" 111 | ] 112 | } 113 | ], 114 | "metadata": { 115 | "@webio": { 116 | "lastCommId": null, 117 | "lastKernelId": null 118 | }, 119 | "kernelspec": { 120 | "display_name": "Julia 1.7.3", 121 | "language": "julia", 122 | "name": "julia-1.7" 123 | }, 124 | "language_info": { 125 | "file_extension": ".jl", 126 | "mimetype": "application/julia", 127 | "name": "julia", 128 | "version": "1.7.3" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 4 133 | } 134 | -------------------------------------------------------------------------------- /2206_Julia/1_projects/sharedproject/code.jl: -------------------------------------------------------------------------------- 1 | # Our colleague's project depends on a useless package 2 | # (https://github.com/crstnbr/Useless.jl) 3 | using Useless 4 | 5 | life_universe_everything(question) = 42 6 | 7 | println(life_universe_everything("What is the meaning of life?")) 8 | 9 | # Step 1: instantiate the environment 10 | # Step 2: run the code: julia --project=. code.jl -------------------------------------------------------------------------------- /2206_Julia/2_linear_algebra/1_linalg.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Linear Algebra ([docs](https://docs.julialang.org/en/v1.0.0/stdlib/LinearAlgebra/))\n", 8 | "\n", 9 | "### Filippo Vicentini, CQSL\n", 10 | "\n", 11 | "Notebook based on work by Carsten Bauer" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "After `using LinearAlgebra`, **Julia speaks linear algebra fluently**.\n", 19 | "\n", 20 | "Performing linear algebra operations on a computer is, of course, an old problem. Lots of amazing libraries have been written - mostly in Fortran - which have been optimized over decades.\n", 21 | "\n", 22 | "Basically all high-level programming languages use these libraries, including R, Python, and Julia.\n", 23 | "\n", 24 | "Linear algebra in Julia is largely implemented by calling [BLAS](http://www.netlib.org/blas/)/[LAPACK](http://www.netlib.org/lapack/) functions. Sparse operations utilize functionality in [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html).\n", 25 | "\n", 26 | "As per default, Julia uses the [OpenBLAS](https://github.com/xianyi/OpenBLAS) implementation (BLAS, LAPACK, LIBM), which can be replaced by [Intel's MKL](https://software.intel.com/en-us/mkl) (BLAS, LAPACK) and [Intel's Math Library](https://software.intel.com/en-us/node/522653) (LIBM).\n", 27 | "\n", 28 | "**What is all this stuff?!?**\n", 29 | "\n", 30 | "* **BLAS**: a collection of low-level matrix and vector arithmetic operations (\"multiply two matrices\", \"multiply a matrix by vector\").\n", 31 | "* **LAPACK**: a collection of higher-level linear algebra operations. Things like matrix factorizations (LU, LLt, QR, SVD, Schur, etc) that are used to do things like “find the eigenvalues of a matrix”, or “find the singular values of a matrix”, or “solve a linear system”.\n", 32 | "* **LIBM**: basic math functions like `sin`, `cos`, `sinh`, etcetera\n", 33 | "\n", 34 | "Sparse matrices are more difficult and there exist different collections of routines, one of which is **SuiteSparse**.\n", 35 | "\n", 36 | "**Why do I have to care?**\n", 37 | "\n", 38 | "* Switching from OpenBLAS to MKL can give you large speedups!\n", 39 | "* Since you might be leaving the world of Julia code, you loose easy inspectability and type genericity. The latter can be an issue for machine learning, as we'll discuss later in more detail." 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "# Taking linear algebra seriously" 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "Julia is [taking linear algebra seriously](https://www.youtube.com/watch?v=C2RO34b_oPM)! (see [here](https://github.com/JuliaLang/julia/issues/4774), and [here](https://github.com/JuliaLang/julia/issues/20978))." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "using LinearAlgebra" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "A = rand(4,4)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "det(A)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "inv(A)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "rank(A)" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "Let's get a vector as well" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "v = rand(4)" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "println(typeof(A))\n", 124 | "println(typeof(v))" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "norm(v)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [ 142 | "v^2 # can't square a vector" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "v.^2" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "sqrt(sum(v.^2))" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "Some things might be suprising" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "metadata": {}, 181 | "outputs": [], 182 | "source": [ 183 | "1/v" 184 | ] 185 | }, 186 | { 187 | "cell_type": "markdown", 188 | "metadata": {}, 189 | "source": [ 190 | "But if it works, there is typically meaning to it. In this case it is calculating the [Moore-Penrose-Pseudoinverse](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Vectors) (`transpose(v)/sum(abs2,v)`)." 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "### Identity matrix: `UniformScaling` operator" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": null, 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "A" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "A .* 3" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "metadata": {}, 222 | "outputs": [], 223 | "source": [ 224 | "A .+ 3" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "metadata": {}, 231 | "outputs": [], 232 | "source": [ 233 | "A .+ 3" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "The `UniformScaling` operator **represents an identity matrix of any size** and is another great example of **duck typing**. It automatically gets loaded into scope when you do `using LinearAlgebra` and has the name `I`." 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": null, 246 | "metadata": {}, 247 | "outputs": [], 248 | "source": [ 249 | "I" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "Although it never actually materializes a full identity matrix it behaves like one." 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": null, 262 | "metadata": {}, 263 | "outputs": [], 264 | "source": [ 265 | "A + 10I" 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "I * A == A" 275 | ] 276 | }, 277 | { 278 | "cell_type": "markdown", 279 | "metadata": {}, 280 | "source": [ 281 | "Hence, we can calculate things like, say, `A-b*I` without ever allocating a dense identity matrix, which would take up $\\mathcal{O}(n^2)$ memory.\n", 282 | "\n", 283 | "Let's benchmark the performance difference!" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "fullI = Matrix{Float64}(I, 4,4) # alternatively but slower, diagm(ones(4))" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "metadata": {}, 299 | "outputs": [], 300 | "source": [ 301 | "fast(A) = A + 3*I" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": null, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "slow(A, fullI) = A + 3*fullI" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": null, 316 | "metadata": {}, 317 | "outputs": [], 318 | "source": [ 319 | "function slower(A)\n", 320 | " fullI = Matrix(1.0I, size(A)...)\n", 321 | " A + 3*fullI\n", 322 | "end" 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": null, 328 | "metadata": {}, 329 | "outputs": [], 330 | "source": [ 331 | "using BenchmarkTools\n", 332 | "@btime fast($A);\n", 333 | "@btime slow($A, $fullI);\n", 334 | "@btime slower($A);" 335 | ] 336 | }, 337 | { 338 | "cell_type": "markdown", 339 | "metadata": {}, 340 | "source": [ 341 | "# Utilizing matrix factorizations" 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "metadata": {}, 347 | "source": [ 348 | "[Matrix factorizations (a.k.a. matrix decompositions)](https://en.wikipedia.org/wiki/Matrix_decomposition)\n", 349 | "are factorization of a matrix into a product of matrices, and are one of the central concepts\n", 350 | "in linear algebra.\n", 351 | "\n", 352 | "Making good use of matrix factorizations is crucial for efficient linear algebra operations.\n", 353 | "\n", 354 | "Example: Solving the linear system `Ax = b`." 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": null, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "A = rand(1:10, 5, 5)" 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": null, 369 | "metadata": {}, 370 | "outputs": [], 371 | "source": [ 372 | "b = rand(5)" 373 | ] 374 | }, 375 | { 376 | "cell_type": "markdown", 377 | "metadata": {}, 378 | "source": [ 379 | "Solve explicitly" 380 | ] 381 | }, 382 | { 383 | "cell_type": "code", 384 | "execution_count": null, 385 | "metadata": {}, 386 | "outputs": [], 387 | "source": [ 388 | "inv(A)*b" 389 | ] 390 | }, 391 | { 392 | "cell_type": "markdown", 393 | "metadata": {}, 394 | "source": [ 395 | "Solve by left division `\\`" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": null, 401 | "metadata": {}, 402 | "outputs": [], 403 | "source": [ 404 | "A\\b" 405 | ] 406 | }, 407 | { 408 | "cell_type": "code", 409 | "execution_count": null, 410 | "metadata": {}, 411 | "outputs": [], 412 | "source": [ 413 | "using BenchmarkTools\n", 414 | "@btime inv($A)*$b; # it is (almost) never necessary to calculate the dense inverse\n", 415 | "@btime $A\\$b;" 416 | ] 417 | }, 418 | { 419 | "cell_type": "markdown", 420 | "metadata": {}, 421 | "source": [ 422 | "What does Julia do to make this so much faster?\n", 423 | "\n", 424 | "It knows that it can perform the division much faster if it first [LU decomposes](https://en.wikipedia.org/wiki/LU_decomposition) `A`." 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": null, 430 | "metadata": {}, 431 | "outputs": [], 432 | "source": [ 433 | "lu(A)\\b" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": null, 439 | "metadata": {}, 440 | "outputs": [], 441 | "source": [ 442 | "@btime lu($A)\\$b" 443 | ] 444 | }, 445 | { 446 | "cell_type": "markdown", 447 | "metadata": {}, 448 | "source": [ 449 | "Let's inspect the output of `lu(A)`" 450 | ] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "execution_count": null, 455 | "metadata": {}, 456 | "outputs": [], 457 | "source": [ 458 | "lu(A)" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": {}, 465 | "outputs": [], 466 | "source": [ 467 | "typeof(lu(A))" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": null, 473 | "metadata": {}, 474 | "outputs": [], 475 | "source": [ 476 | "supertype(LU)" 477 | ] 478 | }, 479 | { 480 | "cell_type": "markdown", 481 | "metadata": {}, 482 | "source": [ 483 | "### List of factorizations\n", 484 | "\n", 485 | "The following table summarizes the types of matrix factorizations that have been implemented in\n", 486 | "Julia. Details of their associated methods can be found in the [Standard Functions](https://docs.julialang.org/en/latest/stdlib/LinearAlgebra/#Standard-Functions-1) section\n", 487 | "of the Linear Algebra documentation.\n", 488 | "\n", 489 | "| Type | Description |\n", 490 | "|:------------------ |:-------------------------------------------------------------------------------------------------------------- |\n", 491 | "| `BunchKaufman` | Bunch-Kaufman factorization |\n", 492 | "| `Cholesky` | [Cholesky factorization](https://en.wikipedia.org/wiki/Cholesky_decomposition) |\n", 493 | "| `CholeskyPivoted` | [Pivoted](https://en.wikipedia.org/wiki/Pivot_element) Cholesky factorization |\n", 494 | "| `LDLt` | [LDL(T) factorization](https://en.wikipedia.org/wiki/Cholesky_decomposition#LDL_decomposition) |\n", 495 | "| `LU` | [LU factorization](https://en.wikipedia.org/wiki/LU_decomposition) |\n", 496 | "| `QR` | [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) |\n", 497 | "| `QRCompactWY` | Compact WY form of the QR factorization |\n", 498 | "| `QRPivoted` | Pivoted [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) |\n", 499 | "| `LQ` | [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) of `transpose(A)` |\n", 500 | "| `Hessenberg` | [Hessenberg decomposition](http://mathworld.wolfram.com/HessenbergDecomposition.html) |\n", 501 | "| `Eigen` | [Spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix) |\n", 502 | "| `GeneralizedEigen` | [Generalized spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix#Generalized_eigenvalue_problem) |\n", 503 | "| `SVD` | [Singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) |\n", 504 | "| `GeneralizedSVD` | [Generalized SVD](https://en.wikipedia.org/wiki/Generalized_singular_value_decomposition#Higher_order_version) |\n", 505 | "| `Schur` | [Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition) |\n", 506 | "| `GeneralizedSchur` | [Generalized Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition#Generalized_Schur_decomposition) |\n", 507 | "\n", 508 | "(Taken from the Julia docs)" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": null, 514 | "metadata": {}, 515 | "outputs": [], 516 | "source": [ 517 | "@btime lu($A)\\$b\n", 518 | "@btime qr($A)\\$b\n", 519 | "@btime svd($A)\\$b;" 520 | ] 521 | }, 522 | { 523 | "cell_type": "markdown", 524 | "metadata": {}, 525 | "source": [ 526 | "From the documentation (`?\\`) of the left division operator:\n", 527 | "\n", 528 | ">Matrix division using a polyalgorithm. For input matrices A and B, the result X is such that A*X == B when A is square. The solver that is used depends upon the structure of A. If A is upper or lower triangular (or diagonal), no factorization of A is required and the system is solved with either forward or backward substitution." 529 | ] 530 | }, 531 | { 532 | "cell_type": "code", 533 | "execution_count": null, 534 | "metadata": {}, 535 | "outputs": [], 536 | "source": [ 537 | "@which \\(rand(2,2), rand(2,2))" 538 | ] 539 | }, 540 | { 541 | "cell_type": "markdown", 542 | "metadata": {}, 543 | "source": [ 544 | "This is what the actual heuristic looks like (`@which`/`@edit` are your friends!)\n", 545 | "\n", 546 | "```julia\n", 547 | "function (\\)(A::AbstractMatrix, B::AbstractVecOrMat)\n", 548 | " require_one_based_indexing(A, B)\n", 549 | " m, n = size(A)\n", 550 | " if m == n\n", 551 | " if istril(A)\n", 552 | " if istriu(A)\n", 553 | " return Diagonal(A) \\ B\n", 554 | " else\n", 555 | " return LowerTriangular(A) \\ B\n", 556 | " end\n", 557 | " end\n", 558 | " if istriu(A)\n", 559 | " return UpperTriangular(A) \\ B\n", 560 | " end\n", 561 | " return lu(A) \\ B\n", 562 | " end\n", 563 | " return qr(A,Val(true)) \\ B\n", 564 | "end\n", 565 | "```" 566 | ] 567 | }, 568 | { 569 | "cell_type": "markdown", 570 | "metadata": {}, 571 | "source": [ 572 | "Generically, a heuristic is implemented in `factorize`:" 573 | ] 574 | }, 575 | { 576 | "cell_type": "code", 577 | "execution_count": null, 578 | "metadata": {}, 579 | "outputs": [], 580 | "source": [ 581 | "typeof(factorize(A))" 582 | ] 583 | }, 584 | { 585 | "cell_type": "code", 586 | "execution_count": null, 587 | "metadata": {}, 588 | "outputs": [], 589 | "source": [ 590 | "typeof(factorize(A+A'))" 591 | ] 592 | }, 593 | { 594 | "cell_type": "markdown", 595 | "metadata": {}, 596 | "source": [ 597 | "# Fast linear algebra with multiple dispatch" 598 | ] 599 | }, 600 | { 601 | "cell_type": "markdown", 602 | "metadata": {}, 603 | "source": [ 604 | "Ok, we've seen that Julia analyses the input matrix using some heuristic, factorizes it appropriately to then perform the calculation efficiently. \n", 605 | "\n", 606 | "But we can (and probably should) also be more explicit about our input to avoid this heuristic. We can encode the special structure of our matrix in a type such that we directly dispatch to the efficient method. Remember, the types decide which method is actually being run!\n", 607 | "\n", 608 | "There are many reasons to indicate what kind of matrix we have.\n", 609 | "\n", 610 | "* Don't rely on a heuristic. Not all methods have one!\n", 611 | "* The heurisitc comes with a small performance penalty.\n", 612 | "* The heurisitc isn't perfect and might fail to notice our matrix's special structure. Maybe because it's not known to base Julia. As we'll see later on, many external packages define additional special matrix types and efficient procedures for them.\n", 613 | "\n", 614 | "There are a number of [special matrix](https://docs.julialang.org/en/latest/stdlib/LinearAlgebra/#Special-matrices-1) types are available out-of-the-box." 615 | ] 616 | }, 617 | { 618 | "cell_type": "code", 619 | "execution_count": null, 620 | "metadata": {}, 621 | "outputs": [], 622 | "source": [ 623 | "D = Diagonal(1:5)" 624 | ] 625 | }, 626 | { 627 | "cell_type": "code", 628 | "execution_count": null, 629 | "metadata": {}, 630 | "outputs": [], 631 | "source": [ 632 | "Ddense = Matrix(D) # same matrix but type doesn't indicate diagonal structure" 633 | ] 634 | }, 635 | { 636 | "cell_type": "code", 637 | "execution_count": null, 638 | "metadata": {}, 639 | "outputs": [], 640 | "source": [ 641 | "@btime $D*$b\n", 642 | "@btime $Ddense*$b" 643 | ] 644 | }, 645 | { 646 | "cell_type": "markdown", 647 | "metadata": {}, 648 | "source": [ 649 | "What method does it dispatch to?" 650 | ] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": null, 655 | "metadata": {}, 656 | "outputs": [], 657 | "source": [ 658 | "@which D*b" 659 | ] 660 | }, 661 | { 662 | "cell_type": "code", 663 | "execution_count": null, 664 | "metadata": {}, 665 | "outputs": [], 666 | "source": [ 667 | "@which Ddense*b" 668 | ] 669 | }, 670 | { 671 | "cell_type": "markdown", 672 | "metadata": {}, 673 | "source": [ 674 | "**Dense Diagonal** (`Ddense*b`)\n", 675 | "\n", 676 | "```julia\n", 677 | "function (*)(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S}\n", 678 | " TS = promote_op(matprod, T, S)\n", 679 | " mul!(similar(x,TS,axes(A,1)),A,x)\n", 680 | "end\n", 681 | "```\n", 682 | "\n", 683 | "**Diagonal** (`D*b`)\n", 684 | "```julia\n", 685 | "(*)(D::Diagonal, V::AbstractVector) = D.diag .* V\n", 686 | "```" 687 | ] 688 | }, 689 | { 690 | "cell_type": "markdown", 691 | "metadata": {}, 692 | "source": [ 693 | "# Fermions hopping on a chain" 694 | ] 695 | }, 696 | { 697 | "cell_type": "markdown", 698 | "metadata": {}, 699 | "source": [ 700 | "$$\\mathcal{H} = -t\\sum_{\\langle i,j \\rangle} c_i^\\dagger c_j + \\mu \\sum_i n_i$$\n", 701 | "\n", 702 | "Here, $t$ is the hopping amplitude, $\\mu$ is the chemical potential, and $c, c^\\dagger$ are creation and annihilation operators.\n", 703 | "\n", 704 | "For simplicity, we'll consider **open boundary conditions** (not periodic), in which case the Hamiltonian is tridiagonal.\n", 705 | "\n", 706 | "Since the fermions are *not* interacting, we can work in the *single particle basis* and do not have to worry about how to construct a basis for the many-body Fock space.\n", 707 | "\n", 708 | "We use the canonical cartesian basis in which one uses $0$s to indicate empty sites and a $1$ for the particle's site, i.e. $|00100\\rangle$ represents the basis state which has the particle exclusively on the 3rd site.\n", 709 | "\n", 710 | "If you aren't familiar with second quantization just think of $\\mathcal{H}$ as any quantum mechanical operator that can be represented as a matrix." 711 | ] 712 | }, 713 | { 714 | "cell_type": "code", 715 | "execution_count": null, 716 | "metadata": {}, 717 | "outputs": [], 718 | "source": [ 719 | "N = 100 # number of sites\n", 720 | "t = 1\n", 721 | "μ = -0.5\n", 722 | "\n", 723 | "H = diagm(0 => fill(μ, N), 1 => fill(-t, N-1), -1 => fill(-t, N-1))" 724 | ] 725 | }, 726 | { 727 | "cell_type": "code", 728 | "execution_count": null, 729 | "metadata": {}, 730 | "outputs": [], 731 | "source": [ 732 | "ψ = normalize(rand(N)); # some state" 733 | ] 734 | }, 735 | { 736 | "cell_type": "code", 737 | "execution_count": null, 738 | "metadata": {}, 739 | "outputs": [], 740 | "source": [ 741 | "ev(H, ψ) = ψ'*H*ψ # <φ|H|φ>" 742 | ] 743 | }, 744 | { 745 | "cell_type": "code", 746 | "execution_count": null, 747 | "metadata": {}, 748 | "outputs": [], 749 | "source": [ 750 | "ev(H, ψ)" 751 | ] 752 | }, 753 | { 754 | "cell_type": "code", 755 | "execution_count": null, 756 | "metadata": {}, 757 | "outputs": [], 758 | "source": [ 759 | "@btime ev($H, $ψ);" 760 | ] 761 | }, 762 | { 763 | "cell_type": "code", 764 | "execution_count": null, 765 | "metadata": {}, 766 | "outputs": [], 767 | "source": [ 768 | "typeof(H)" 769 | ] 770 | }, 771 | { 772 | "cell_type": "markdown", 773 | "metadata": {}, 774 | "source": [ 775 | "As long as the code is generic (respects the informal `AbstractArray` interface), we can use the same piece of code for completely different array types.\n", 776 | "\n", 777 | "Let's utilize the sparsity of `H` by indicating it through a type." 778 | ] 779 | }, 780 | { 781 | "cell_type": "code", 782 | "execution_count": null, 783 | "metadata": {}, 784 | "outputs": [], 785 | "source": [ 786 | "using SparseArrays\n", 787 | "Hsparse = sparse(H)" 788 | ] 789 | }, 790 | { 791 | "cell_type": "code", 792 | "execution_count": null, 793 | "metadata": {}, 794 | "outputs": [], 795 | "source": [ 796 | "@btime ev($Hsparse, $ψ);" 797 | ] 798 | }, 799 | { 800 | "cell_type": "markdown", 801 | "metadata": {}, 802 | "source": [ 803 | "That's a solid **30x speedup**!" 804 | ] 805 | }, 806 | { 807 | "cell_type": "markdown", 808 | "metadata": {}, 809 | "source": [ 810 | "Our `H` isn't just sparse, but actually tridiagonal. Let's try to exploit that." 811 | ] 812 | }, 813 | { 814 | "cell_type": "code", 815 | "execution_count": null, 816 | "metadata": {}, 817 | "outputs": [], 818 | "source": [ 819 | "Htri = Tridiagonal(H)" 820 | ] 821 | }, 822 | { 823 | "cell_type": "code", 824 | "execution_count": null, 825 | "metadata": {}, 826 | "outputs": [], 827 | "source": [ 828 | "@btime ev($Htri, $ψ);" 829 | ] 830 | }, 831 | { 832 | "cell_type": "markdown", 833 | "metadata": {}, 834 | "source": [ 835 | "Choosing the best type (and therewith an algorithm) can be tricky and one has to play around a bit. The good thing is that it's very easy to try out different types!\n", 836 | "\n", 837 | "Note that there are also great matrix types available in the ecosystem, see [JuliaMatrices](https://github.com/JuliaMatrices), for example." 838 | ] 839 | }, 840 | { 841 | "cell_type": "markdown", 842 | "metadata": {}, 843 | "source": [ 844 | "# Exact diagonalisation a.k.a Eigendecomposition" 845 | ] 846 | }, 847 | { 848 | "cell_type": "markdown", 849 | "metadata": {}, 850 | "source": [ 851 | "To diagonalize our dense \"Hamiltonian\", we simply call the built-in function `eigen`." 852 | ] 853 | }, 854 | { 855 | "cell_type": "code", 856 | "execution_count": null, 857 | "metadata": {}, 858 | "outputs": [], 859 | "source": [ 860 | "vals, vecs = eigen(H)" 861 | ] 862 | }, 863 | { 864 | "cell_type": "code", 865 | "execution_count": null, 866 | "metadata": {}, 867 | "outputs": [], 868 | "source": [ 869 | "ψ0 = vecs[:,1] # single-particle groundstate" 870 | ] 871 | }, 872 | { 873 | "cell_type": "code", 874 | "execution_count": null, 875 | "metadata": {}, 876 | "outputs": [], 877 | "source": [ 878 | "ev(H, ψ0)" 879 | ] 880 | }, 881 | { 882 | "cell_type": "code", 883 | "execution_count": null, 884 | "metadata": {}, 885 | "outputs": [], 886 | "source": [ 887 | "ev(H, ψ0) <= ev(H, ψ) # groundstate has the lowest energy" 888 | ] 889 | }, 890 | { 891 | "cell_type": "code", 892 | "execution_count": null, 893 | "metadata": {}, 894 | "outputs": [], 895 | "source": [ 896 | "using Plots\n", 897 | "\n", 898 | "show_n_states = 3\n", 899 | "\n", 900 | "p = plot()\n", 901 | "for i in 1:show_n_states\n", 902 | " plot!(p, abs2.(vecs[:,i]), xlab=\"site\", ylab=\"probability\", lab=\"n = $(i-1)\")\n", 903 | "end\n", 904 | "p" 905 | ] 906 | }, 907 | { 908 | "cell_type": "markdown", 909 | "metadata": {}, 910 | "source": [ 911 | "Since Julia is using eigenproblem solvers from LAPACK (written in a low-level language) the code is, of course, **not generic**." 912 | ] 913 | }, 914 | { 915 | "cell_type": "markdown", 916 | "metadata": {}, 917 | "source": [ 918 | "The best Julia can do, without implementing new functionality, is manually dispatch to the best LAPACK routine available.\n", 919 | "\n", 920 | "Hence, it won't work with most of our special matrices." 921 | ] 922 | }, 923 | { 924 | "cell_type": "code", 925 | "execution_count": null, 926 | "metadata": {}, 927 | "outputs": [], 928 | "source": [ 929 | "eigen(Htri);" 930 | ] 931 | }, 932 | { 933 | "cell_type": "markdown", 934 | "metadata": {}, 935 | "source": [ 936 | "If we're lucky, someone has implemented a generic solver in Julia that works for a wider range of types. Example:" 937 | ] 938 | }, 939 | { 940 | "cell_type": "code", 941 | "execution_count": null, 942 | "metadata": {}, 943 | "outputs": [], 944 | "source": [ 945 | "Hbig = big.(H)\n", 946 | "eigen(Hermitian(Hbig));" 947 | ] 948 | }, 949 | { 950 | "cell_type": "code", 951 | "execution_count": null, 952 | "metadata": {}, 953 | "outputs": [], 954 | "source": [ 955 | "using GenericLinearAlgebra" 956 | ] 957 | }, 958 | { 959 | "cell_type": "code", 960 | "execution_count": null, 961 | "metadata": {}, 962 | "outputs": [], 963 | "source": [ 964 | "eigen(Hermitian(Hbig));" 965 | ] 966 | }, 967 | { 968 | "cell_type": "markdown", 969 | "metadata": {}, 970 | "source": [ 971 | "Arguably the most important matrix type in physics applications is a sparse matrix, i.e. `SparseMatrixCSC`." 972 | ] 973 | }, 974 | { 975 | "cell_type": "code", 976 | "execution_count": null, 977 | "metadata": {}, 978 | "outputs": [], 979 | "source": [ 980 | "eigen(Hsparse)" 981 | ] 982 | }, 983 | { 984 | "cell_type": "markdown", 985 | "metadata": {}, 986 | "source": [ 987 | "Let's follow Julia's advice and take a look at [ARPACK.jl](https://github.com/JuliaLinearAlgebra/Arpack.jl) and similar packages." 988 | ] 989 | }, 990 | { 991 | "cell_type": "markdown", 992 | "metadata": {}, 993 | "source": [ 994 | "### Diagonalizing sparse matrices" 995 | ] 996 | }, 997 | { 998 | "cell_type": "markdown", 999 | "metadata": {}, 1000 | "source": [ 1001 | "[ARPACK.jl]() - Wrapper to Fortran library [ARPACK](https://www.caam.rice.edu/software/ARPACK/) which implements **iterative** eigenvalue and singular value solvers. By far the most established sparse eigensolver.\n", 1002 | "\n", 1003 | "Julia implementations:\n", 1004 | "\n", 1005 | "* [ArnoldiMethod.jl](https://github.com/haampie/ArnoldiMethod.jl)\n", 1006 | "* [KrylovKit.jl](https://github.com/Jutho/KrylovKit.jl)\n", 1007 | "* [IterativeSolvers.jl](https://github.com/JuliaMath/IterativeSolvers.jl)\n", 1008 | "* and more\n", 1009 | "\n", 1010 | "\n", 1011 | "A key thing to remember is that while `eigen` is - up to numerical errors - exact, the methods in the packages above are iterative and approximative." 1012 | ] 1013 | }, 1014 | { 1015 | "cell_type": "markdown", 1016 | "metadata": {}, 1017 | "source": [ 1018 | "Arpack uses a different name for the eigenvalue decomposition. They called it `eigs`." 1019 | ] 1020 | }, 1021 | { 1022 | "cell_type": "code", 1023 | "execution_count": null, 1024 | "metadata": {}, 1025 | "outputs": [], 1026 | "source": [ 1027 | "using Arpack\n", 1028 | "λ, evs = eigs(Hsparse);\n", 1029 | "λ" 1030 | ] 1031 | }, 1032 | { 1033 | "cell_type": "markdown", 1034 | "metadata": {}, 1035 | "source": [ 1036 | "For ArnoldiMethod, one has to go through a two-step process." 1037 | ] 1038 | }, 1039 | { 1040 | "cell_type": "code", 1041 | "execution_count": null, 1042 | "metadata": {}, 1043 | "outputs": [], 1044 | "source": [ 1045 | "using ArnoldiMethod\n", 1046 | "decomp, history = partialschur(Hsparse)\n", 1047 | "λ, evs = partialeigen(decomp);\n", 1048 | "λ" 1049 | ] 1050 | }, 1051 | { 1052 | "cell_type": "markdown", 1053 | "metadata": {}, 1054 | "source": [ 1055 | "In KrylovKit, they call the function `eigsolve`." 1056 | ] 1057 | }, 1058 | { 1059 | "cell_type": "code", 1060 | "execution_count": null, 1061 | "metadata": {}, 1062 | "outputs": [], 1063 | "source": [ 1064 | "using KrylovKit\n", 1065 | "λ, evs = eigsolve(Hsparse);\n", 1066 | "λ" 1067 | ] 1068 | }, 1069 | { 1070 | "cell_type": "markdown", 1071 | "metadata": {}, 1072 | "source": [ 1073 | "# Core messages of this Notebook" 1074 | ] 1075 | }, 1076 | { 1077 | "cell_type": "markdown", 1078 | "metadata": {}, 1079 | "source": [ 1080 | "* The standard libraries `LinearAlgebra` and `SparseArrays` make Julia speak linear algebra.\n", 1081 | "* **Indicate properties and structure of a matrix**, like hermiticity or sparsity, through types. Fallback to generic types only if you run into method errors.\n", 1082 | "* For **sparse matrix exact diagonalization**, ARPACK.jl is sort of a standard but there are great alternatives like ArnoldiMethods.jl." 1083 | ] 1084 | }, 1085 | { 1086 | "cell_type": "markdown", 1087 | "metadata": {}, 1088 | "source": [ 1089 | "## If time permits" 1090 | ] 1091 | }, 1092 | { 1093 | "cell_type": "markdown", 1094 | "metadata": {}, 1095 | "source": [ 1096 | "### StaticArrays.jl" 1097 | ] 1098 | }, 1099 | { 1100 | "cell_type": "code", 1101 | "execution_count": null, 1102 | "metadata": {}, 1103 | "outputs": [], 1104 | "source": [ 1105 | "using StaticArrays" 1106 | ] 1107 | }, 1108 | { 1109 | "cell_type": "code", 1110 | "execution_count": null, 1111 | "metadata": {}, 1112 | "outputs": [], 1113 | "source": [ 1114 | "m = SMatrix{2,2}(1, 2, 3, 4)" 1115 | ] 1116 | }, 1117 | { 1118 | "cell_type": "code", 1119 | "execution_count": null, 1120 | "metadata": {}, 1121 | "outputs": [], 1122 | "source": [ 1123 | "size(m)" 1124 | ] 1125 | }, 1126 | { 1127 | "cell_type": "code", 1128 | "execution_count": null, 1129 | "metadata": {}, 1130 | "outputs": [], 1131 | "source": [ 1132 | "size(typeof(m))" 1133 | ] 1134 | }, 1135 | { 1136 | "cell_type": "code", 1137 | "execution_count": null, 1138 | "metadata": {}, 1139 | "outputs": [], 1140 | "source": [ 1141 | "# compare to\n", 1142 | "M = Matrix(m)\n", 1143 | "size(typeof(M))" 1144 | ] 1145 | }, 1146 | { 1147 | "cell_type": "code", 1148 | "execution_count": null, 1149 | "metadata": {}, 1150 | "outputs": [], 1151 | "source": [ 1152 | "@SMatrix rand(4,4)" 1153 | ] 1154 | }, 1155 | { 1156 | "cell_type": "code", 1157 | "execution_count": null, 1158 | "metadata": {}, 1159 | "outputs": [], 1160 | "source": [ 1161 | "using BenchmarkTools, LinearAlgebra" 1162 | ] 1163 | }, 1164 | { 1165 | "cell_type": "code", 1166 | "execution_count": null, 1167 | "metadata": {}, 1168 | "outputs": [], 1169 | "source": [ 1170 | "println(\"Inversion\")\n", 1171 | "@btime inv(m);\n", 1172 | "@btime inv(M);" 1173 | ] 1174 | }, 1175 | { 1176 | "cell_type": "code", 1177 | "execution_count": null, 1178 | "metadata": {}, 1179 | "outputs": [], 1180 | "source": [ 1181 | "println(\"Matrix x vector\")\n", 1182 | "v = rand(2)\n", 1183 | "@btime $m * $v;\n", 1184 | "@btime $M * $v;" 1185 | ] 1186 | }, 1187 | { 1188 | "cell_type": "code", 1189 | "execution_count": null, 1190 | "metadata": {}, 1191 | "outputs": [], 1192 | "source": [ 1193 | "vstatic = @SArray rand(2);\n", 1194 | "@code_native debuginfo=:none m*vstatic" 1195 | ] 1196 | }, 1197 | { 1198 | "cell_type": "code", 1199 | "execution_count": null, 1200 | "metadata": {}, 1201 | "outputs": [], 1202 | "source": [ 1203 | "@code_native debuginfo=:none M*v" 1204 | ] 1205 | }, 1206 | { 1207 | "cell_type": "markdown", 1208 | "metadata": {}, 1209 | "source": [ 1210 | "### Dude, I have a GPU!" 1211 | ] 1212 | }, 1213 | { 1214 | "cell_type": "markdown", 1215 | "metadata": {}, 1216 | "source": [ 1217 | "To make another case for *generic programming*, if you want to move the calculation to a GPU, chances are you only have to change the type of your matrix!" 1218 | ] 1219 | } 1220 | ], 1221 | "metadata": { 1222 | "@webio": { 1223 | "lastCommId": null, 1224 | "lastKernelId": null 1225 | }, 1226 | "kernelspec": { 1227 | "display_name": "Julia 1.7.3", 1228 | "language": "julia", 1229 | "name": "julia-1.7" 1230 | }, 1231 | "language_info": { 1232 | "file_extension": ".jl", 1233 | "mimetype": "application/julia", 1234 | "name": "julia", 1235 | "version": "1.7.3" 1236 | } 1237 | }, 1238 | "nbformat": 4, 1239 | "nbformat_minor": 4 1240 | } 1241 | -------------------------------------------------------------------------------- /2206_Julia/2_linear_algebra/2_ed_quantum_ising.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Quantum Ising Phase Transition\n", 8 | "### Filippo Vicentini, CQSL\n", 9 | "\n", 10 | "Notebook based on work by Carsten Bauer, Katharine Hyatt" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "## Introduction" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "In this tutorial we will consider a simple quantum mechanical system of spins sitting on a chain. Here, *quantum mechanical*, despite its pompous sound, simply means that our Hamiltonian matrix will have a non-trivial (i.e. non-diagonal) matrix structure.\n", 25 | "\n", 26 | "We will then ask a couple of basic questions,\n", 27 | "\n", 28 | "* What is the ground state of the system?\n", 29 | "* What happens if we turn on a transverse magnetic field?\n", 30 | "* Are there any phase transitions?\n", 31 | "\n", 32 | "To get answers to the questions, we will solve the time-independent Schrödinger equation\n", 33 | "\n", 34 | "$$H|\\psi\\rangle = E |\\psi\\rangle$$\n", 35 | "\n", 36 | "in Julia by means of exact diagonalization of the Hamiltonian." 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "## Transverse field quantum Ising chain" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "Let's start out by defining our system. The Hamiltonian is given by" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "$$\\mathcal{H} = -\\sum_{\\langle i, j \\rangle} \\hat{\\sigma}_i^z \\otimes \\hat{\\sigma}_j^z - h\\sum_i \\hat{\\sigma}_i^x$$" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "Here, $\\hat{\\sigma}^z$ and $\\hat{\\sigma}^x$ are two of the three [Pauli matrices](https://en.wikipedia.org/wiki/Pauli_matrices), representing our quantum spins, $\\langle i, j \\rangle$ indicates that only neighboring spins talk to each other, and $h$ is the amplitude of the magnetic field. " 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "σᶻ = [1 0; 0 -1] # \\sigma followed by \\^z " 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "σˣ = [0 1; 1 0]" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "Labeling the eigensates of $\\sigma^z$ as $|\\downarrow\\rangle$ and $|\\uparrow\\rangle$, we interpret them as a spin pointing down or up (in $z$-direction), respectively.\n", 90 | "\n", 91 | "Clearly, since being purely off-diagonal, the effect of $\\sigma^x$ on such a single spin is to flip it:\n", 92 | "\n", 93 | "$$\\hat{\\sigma}^x\\left| \\downarrow \\right\\rangle = \\left| \\uparrow \\right\\rangle$$\n", 94 | "\n", 95 | "$$\\hat{\\sigma}^x\\left| \\uparrow \\right\\rangle = \\left| \\downarrow \\right\\rangle$$" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "The idea behind the Hamiltonian above is as follows:\n", 103 | "\n", 104 | "* The first term is diagonal in the $\\sigma^z$ eigenbasis. If there is no magnetic field, $h=0$, our quantum model reduces to the well-known classical [Ising model](https://en.wikipedia.org/wiki/Ising_model) (diagonal = trivial matrix structure -> classical). In this case, we have a **finite temperature phase transition** from a paramagnetic ($T>T_c$) phase, where the spins are **disordered by thermal fluctuations**, to a ferromagnetic phase ($T)" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "Let's be fancy (cause we can!) and make this look a bit cooler." 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "⊗(x,y) = kron(x,y)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "σᶻ ⊗ σᶻ" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": {}, 166 | "source": [ 167 | "### Explicit 4-site Hamiltonian" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "Imagine our spin chain consists of four sites. Writing out identity matrices (which were left implicit in $H$ above) explicitly, our Hamiltonian reads" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "metadata": {}, 180 | "source": [ 181 | "$$\\mathcal{H}_4 = -\\hat{\\sigma}_1^z \\hat{\\sigma}_2^z \\hat{I}_3 \\hat{I}_4 - \\hat{I}_1 \\hat{\\sigma}_2^z \\hat{\\sigma}_3^z \\hat{I}_4 - \\hat{I}_1 \\hat{I}_2 \\hat{\\sigma}_3^z \\hat{\\sigma}_4^z - h\\left(\\hat{\\sigma}_1^x\\hat{I}_2 \\hat{I}_3\\hat{I}_4 + \\hat{I}_1 \\hat{\\sigma}_2^x \\hat{I}_3\\hat{I}_4 +\\hat{I}_1 \\hat{I}_2 \\hat{\\sigma}_3^x\\hat{I}_4 + \\hat{I}_1 \\hat{I}_2 \\hat{I}_3 \\hat{\\sigma}_4^x\\right)$$" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "metadata": {}, 187 | "source": [ 188 | "(Note that we are considering *open* boundary conditions here - the spin on site 4 doesn't interact with the one on the first site. For *periodic* boundary conditions we'd have to add a term $- \\hat{\\sigma}^z_1 \\hat{I}_2 \\hat{I}_3 \\hat{\\sigma}_4^z$.)\n", 189 | "\n", 190 | "Translating this expression to Julia is super easy. After defining the identity matrix" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "id = [1 0; 0 1] # identity matrix" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "metadata": {}, 205 | "source": [ 206 | "we can simply write" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "h = 1\n", 216 | "H = - σᶻ⊗σᶻ⊗id⊗id - id⊗σᶻ⊗σᶻ⊗id - id⊗id⊗σᶻ⊗σᶻ\n", 217 | "H -= h*(σˣ⊗id⊗id⊗id + id⊗σˣ⊗id⊗id + id⊗id⊗σˣ⊗id + id⊗id⊗id⊗σˣ)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "markdown", 222 | "metadata": {}, 223 | "source": [ 224 | "There it is.\n", 225 | "\n", 226 | "As nice as it is to write those tensor products explicitly, we certainly wouldn't want to write out all the terms for, say, 100 sites.\n", 227 | "\n", 228 | "Let's define a function that iteratively does the job for us." 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [ 237 | "function TransverseFieldIsing(;N,h)\n", 238 | " id = [1 0; 0 1]\n", 239 | " σˣ = [0 1; 1 0]\n", 240 | " σᶻ = [1 0; 0 -1]\n", 241 | " \n", 242 | " # vector of operators: [σᶻ, σᶻ, id, ...]\n", 243 | " first_term_ops = fill(id, N)\n", 244 | " first_term_ops[1] = σᶻ\n", 245 | " first_term_ops[2] = σᶻ\n", 246 | " \n", 247 | " # vector of operators: [σˣ, id, ...]\n", 248 | " second_term_ops = fill(id, N)\n", 249 | " second_term_ops[1] = σˣ\n", 250 | " \n", 251 | " H = zeros(Int, 2^N, 2^N)\n", 252 | " for i in 1:N-1\n", 253 | " # tensor multiply all operators\n", 254 | " H -= foldl(⊗, first_term_ops)\n", 255 | " # cyclic shift the operators\n", 256 | " first_term_ops = circshift(first_term_ops,1)\n", 257 | " end\n", 258 | " \n", 259 | " for i in 1:N\n", 260 | " H -= h*foldl(⊗, second_term_ops)\n", 261 | " second_term_ops = circshift(second_term_ops,1)\n", 262 | " end\n", 263 | " H\n", 264 | "end" 265 | ] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "execution_count": null, 270 | "metadata": {}, 271 | "outputs": [], 272 | "source": [ 273 | "TransverseFieldIsing(N=8, h=1)" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "### Many-particle basis\n", 281 | "\n", 282 | "Beyond a single spin, we have to think how to encode our basis states.\n", 283 | "\n", 284 | "We make the arbitrary choice:\n", 285 | "$0 = \\text{false} = \\downarrow$ and $1 = \\text{true} = \\uparrow$\n", 286 | "\n", 287 | "This way, our many-spin basis states have nice a binary representations and we can efficiently store them in a Julia `BitArray`.\n", 288 | "\n", 289 | "Example: $|0010\\rangle = |\\text{false},\\text{false},\\text{true},\\text{false}\\rangle = |\\downarrow\\downarrow\\uparrow\\downarrow>$ is a basis state of a 4-site system\n", 290 | "\n", 291 | "We construct the full basis by binary counting." 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": null, 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "\"\"\"\n", 301 | "Binary `BitArray` representation of the given integer `num`, padded to length `N`.\n", 302 | "\"\"\"\n", 303 | "bit_rep(num::Integer, N::Integer) = BitArray(parse(Bool, i) for i in string(num, base=2, pad=N))\n", 304 | "\n", 305 | "\"\"\"\n", 306 | " generate_basis(N::Integer) -> basis\n", 307 | "\n", 308 | "Generates a basis (`Vector{BitArray}`) spanning the Hilbert space of `N` spins.\n", 309 | "\"\"\"\n", 310 | "function generate_basis(N::Integer)\n", 311 | " nstates = 2^N\n", 312 | " basis = Vector{BitArray{1}}(undef, nstates)\n", 313 | " for i in 0:nstates-1\n", 314 | " basis[i+1] = bit_rep(i, N)\n", 315 | " end\n", 316 | " return basis\n", 317 | "end" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": null, 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "generate_basis(4)" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "### Side remark: Iterative construction of $H$" 334 | ] 335 | }, 336 | { 337 | "cell_type": "markdown", 338 | "metadata": {}, 339 | "source": [ 340 | "It might not be obvious that this basis is indeed the basis underlying the Hamiltonian matrix constructed in `TransverseFieldIsing`. To convince ourselves that this is indeed the case, let's calculate the matrix elements of our Hamiltonian, $\\langle \\psi_1 | H | \\psi_2 \\rangle$, explicitly by applying $H$ to our basis states and utilizing their orthonormality, $\\langle \\psi_i | \\psi_j \\rangle = \\sigma_{i,j}$." 341 | ] 342 | }, 343 | { 344 | "cell_type": "code", 345 | "execution_count": null, 346 | "metadata": {}, 347 | "outputs": [], 348 | "source": [ 349 | "using LinearAlgebra\n", 350 | "\n", 351 | "function TransverseFieldIsing_explicit(; N::Integer, h::T=0) where T<:Real\n", 352 | " basis = generate_basis(N)\n", 353 | " H = zeros(T, 2^N, 2^N)\n", 354 | " bonds = zip(collect(1:N-1), collect(2:N))\n", 355 | " for (i, bstate) in enumerate(basis)\n", 356 | " # diagonal part\n", 357 | " diag_term = 0.\n", 358 | " for (site_i, site_j) in bonds\n", 359 | " if bstate[site_i] == bstate[site_j]\n", 360 | " diag_term -= 1\n", 361 | " else\n", 362 | " diag_term += 1\n", 363 | " end\n", 364 | " end\n", 365 | " H[i, i] = diag_term\n", 366 | " \n", 367 | " # off diagonal part\n", 368 | " for site in 1:N\n", 369 | " new_bstate = copy(bstate)\n", 370 | " # flip the bit on the site (that's what σˣ does)\n", 371 | " new_bstate[site] = !new_bstate[site]\n", 372 | " # find corresponding single basis state with unity overlap (orthonormality)\n", 373 | " new_i = findfirst(isequal(new_bstate), basis)\n", 374 | " H[i, new_i] = -h\n", 375 | " end\n", 376 | " end\n", 377 | " return H\n", 378 | "end" 379 | ] 380 | }, 381 | { 382 | "cell_type": "code", 383 | "execution_count": null, 384 | "metadata": {}, 385 | "outputs": [], 386 | "source": [ 387 | "TransverseFieldIsing_explicit(N=4, h=1) ≈ TransverseFieldIsing(N=4, h=1)" 388 | ] 389 | }, 390 | { 391 | "cell_type": "markdown", 392 | "metadata": {}, 393 | "source": [ 394 | "### Full exact diagonalization" 395 | ] 396 | }, 397 | { 398 | "cell_type": "markdown", 399 | "metadata": {}, 400 | "source": [ 401 | "Alright. Let's solve the Schrödinger equation by diagonalizing $H$ for a system with $N=8$ and $h=1$." 402 | ] 403 | }, 404 | { 405 | "cell_type": "code", 406 | "execution_count": null, 407 | "metadata": {}, 408 | "outputs": [], 409 | "source": [ 410 | "basis = generate_basis(8)\n", 411 | "H = TransverseFieldIsing(N=8, h=1)\n", 412 | "vals, vecs = eigen(H)" 413 | ] 414 | }, 415 | { 416 | "cell_type": "markdown", 417 | "metadata": {}, 418 | "source": [ 419 | "That's it. Here is our groundstate. " 420 | ] 421 | }, 422 | { 423 | "cell_type": "code", 424 | "execution_count": null, 425 | "metadata": {}, 426 | "outputs": [], 427 | "source": [ 428 | "groundstate = vecs[:,1];" 429 | ] 430 | }, 431 | { 432 | "cell_type": "markdown", 433 | "metadata": {}, 434 | "source": [ 435 | "The absolute square of this wave function is the probability of finding the system in a particular basis state." 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": null, 441 | "metadata": {}, 442 | "outputs": [], 443 | "source": [ 444 | "abs2.(groundstate)" 445 | ] 446 | }, 447 | { 448 | "cell_type": "markdown", 449 | "metadata": {}, 450 | "source": [ 451 | "It's instructive to look at the extremal cases $h=0$ and $h>>1$." 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": null, 457 | "metadata": {}, 458 | "outputs": [], 459 | "source": [ 460 | "H = TransverseFieldIsing(N=8, h=0)\n", 461 | "vals, vecs = eigen(H)\n", 462 | "groundstate = vecs[:,1]\n", 463 | "abs2.(groundstate)" 464 | ] 465 | }, 466 | { 467 | "cell_type": "markdown", 468 | "metadata": {}, 469 | "source": [ 470 | "As we can see, for $h=0$ the system is (with probability one) in the first basis state, where all spins point in $-z$ direction." 471 | ] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": null, 476 | "metadata": {}, 477 | "outputs": [], 478 | "source": [ 479 | "basis[1]" 480 | ] 481 | }, 482 | { 483 | "cell_type": "markdown", 484 | "metadata": {}, 485 | "source": [ 486 | "On the other hand, for $h=100$, the system occupies all basis states with approximately equal probability (maximal superposition) - corresponding to eigenstates of $\\sigma^x$, i.e. alignment to the magnetic field." 487 | ] 488 | }, 489 | { 490 | "cell_type": "code", 491 | "execution_count": null, 492 | "metadata": {}, 493 | "outputs": [], 494 | "source": [ 495 | "H = TransverseFieldIsing(N=8, h=100)\n", 496 | "vals, vecs = eigen(H)\n", 497 | "groundstate = vecs[:,1]\n", 498 | "abs2.(groundstate)" 499 | ] 500 | }, 501 | { 502 | "cell_type": "markdown", 503 | "metadata": {}, 504 | "source": [ 505 | "# Are you a magnet or what?" 506 | ] 507 | }, 508 | { 509 | "cell_type": "markdown", 510 | "metadata": {}, 511 | "source": [ 512 | "Let's vary $h$ and see what happens. Since we're looking at quantum magnets we will compute the overall magnetization, defined by\n", 513 | "\n", 514 | "$$M = \\frac{1}{N}\\sum_{i} \\sigma^z_i$$\n", 515 | "where $\\sigma^z_i$ is the value of the spin on site $i$ when we measure." 516 | ] 517 | }, 518 | { 519 | "cell_type": "code", 520 | "execution_count": null, 521 | "metadata": {}, 522 | "outputs": [], 523 | "source": [ 524 | "function magnetization(state, basis)\n", 525 | " M = 0.\n", 526 | " for (i, bstate) in enumerate(basis)\n", 527 | " bstate_M = 0.\n", 528 | " for spin in bstate\n", 529 | " bstate_M += (state[i]^2 * (spin ? 1 : -1))/length(bstate)\n", 530 | " end\n", 531 | " @assert abs(bstate_M) <= 1\n", 532 | " M += abs(bstate_M)\n", 533 | " end\n", 534 | " return M\n", 535 | "end" 536 | ] 537 | }, 538 | { 539 | "cell_type": "code", 540 | "execution_count": null, 541 | "metadata": {}, 542 | "outputs": [], 543 | "source": [ 544 | "magnetization(groundstate, basis)" 545 | ] 546 | }, 547 | { 548 | "cell_type": "markdown", 549 | "metadata": {}, 550 | "source": [ 551 | "Now we would like to examine the effects of $h$. We will:\n", 552 | "\n", 553 | "1. Find a variety of $h$ to look at.\n", 554 | "2. For each, compute the lowest energy eigenvector (groundstate) of the corresponding Hamiltonian.\n", 555 | "3. For each groundstate, compute the overall magnetization $M$.\n", 556 | "4. Plot $M(h)$ for a variety of system sizes, and see if anything cool happens." 557 | ] 558 | }, 559 | { 560 | "cell_type": "code", 561 | "execution_count": null, 562 | "metadata": {}, 563 | "outputs": [], 564 | "source": [ 565 | "using Plots\n", 566 | "hs = 10 .^ range(-2., stop=2., length=10)\n", 567 | "Ns = 2:10\n", 568 | "p = plot()\n", 569 | "for N in Ns\n", 570 | " M = zeros(length(hs))\n", 571 | " for (i,h) in enumerate(hs)\n", 572 | " basis = generate_basis(N)\n", 573 | " H = TransverseFieldIsing(N=N, h=h)\n", 574 | " vals, vecs = eigen(H)\n", 575 | " groundstate = vecs[:,1]\n", 576 | " M[i] = magnetization(groundstate, basis)\n", 577 | " end\n", 578 | " plot!(p, hs, M, xscale=:log10, marker=:circle, label=\"N = $N\",\n", 579 | " xlab=\"h\", ylab=\"M(h)\")\n", 580 | " println(M)\n", 581 | "end\n", 582 | "p" 583 | ] 584 | }, 585 | { 586 | "cell_type": "markdown", 587 | "metadata": {}, 588 | "source": [ 589 | "**This looks like a phase transition!**\n", 590 | "\n", 591 | "For small $h$, the magnetization is unity, corresponding to a ferromagnetic state. By increasing the magnetic field $h$ we have a competition between the two terms in the Hamiltonian and eventually the system becomes paramagnetic with $M\\approx0$. Our plot suggests that this change of state happens around $h\\sim1$, which is in good agreement with the exact solution $h=1$.\n", 592 | "\n", 593 | "It is crucial to realize, that in our calculation we are inspecting the ground state of the system. Since $T=0$, it is purely quantum fluctuations that drive the transition: a **quantum phase transition**! This is to be compared to increasing temperature in the classical Ising model, where it's thermal fluctuations that cause a classical phase transition from a ferromagnetic to a paramagnetic state. For this reason, the state that we observe at high magnetic field strengths is called a **quantum paramagnet**." 594 | ] 595 | }, 596 | { 597 | "cell_type": "markdown", 598 | "metadata": {}, 599 | "source": [ 600 | "## Hilbert space is a big space" 601 | ] 602 | }, 603 | { 604 | "cell_type": "markdown", 605 | "metadata": {}, 606 | "source": [ 607 | "So far, we have only inspected chains of length $N\\leq10$. As we see in our plot above, there are rather strong finite-size effects on the magnetization. To extract a numerical estimate for the critical magnetic field strength $h_c$ of the transition we would have to consider much larger systems until we observe convergence as a function of $N$. Although this is clearly beyond the scope of this tutorial, let us at least pave the way.\n", 608 | "\n", 609 | "Our calculation, in its current form, doesn't scale. The reason for this is simple, **Hilbert space is a big place!**\n", 610 | "\n", 611 | "The number of basis states, and therefore the number of dimensions, grows **exponentially** with system size." 612 | ] 613 | }, 614 | { 615 | "cell_type": "code", 616 | "execution_count": null, 617 | "metadata": {}, 618 | "outputs": [], 619 | "source": [ 620 | "plot(N -> 2^N, 1, 20, legend=false, color=:black, xlab=\"N\", ylab=\"# Hilbert space dimensions\")" 621 | ] 622 | }, 623 | { 624 | "cell_type": "markdown", 625 | "metadata": {}, 626 | "source": [ 627 | "Our Hamiltonian matrix therefore will become huge(!) and is not going to fit into memory (apart from the fact that diagonalization would take forever)." 628 | ] 629 | }, 630 | { 631 | "cell_type": "code", 632 | "execution_count": null, 633 | "metadata": {}, 634 | "outputs": [], 635 | "source": [ 636 | "using Test\n", 637 | "@test_throws OutOfMemoryError TransverseFieldIsing(N=20, h=1)" 638 | ] 639 | }, 640 | { 641 | "cell_type": "markdown", 642 | "metadata": {}, 643 | "source": [ 644 | "So, what can we do about it? The answer is, **sparsity**.\n", 645 | "\n", 646 | "Let's inspect the Hamiltonian a bit more closely." 647 | ] 648 | }, 649 | { 650 | "cell_type": "code", 651 | "execution_count": null, 652 | "metadata": {}, 653 | "outputs": [], 654 | "source": [ 655 | "H = TransverseFieldIsing(N=10, h=1)" 656 | ] 657 | }, 658 | { 659 | "cell_type": "markdown", 660 | "metadata": {}, 661 | "source": [ 662 | "Noticably, there are a lot of zeros. How does this depend on $N$?\n", 663 | "\n", 664 | "Let's plot the sparsity, i.e. ratio of zero entries." 665 | ] 666 | }, 667 | { 668 | "cell_type": "code", 669 | "execution_count": null, 670 | "metadata": {}, 671 | "outputs": [], 672 | "source": [ 673 | "sparsity(x) = count(isequal(0), x)/length(x)\n", 674 | "\n", 675 | "Ns = 2:12\n", 676 | "sparsities = Float64[]\n", 677 | "for N in Ns\n", 678 | " H = TransverseFieldIsing(N=N, h=1)\n", 679 | " push!(sparsities, sparsity(H))\n", 680 | "end\n", 681 | "plot(Ns, sparsities, legend=false, xlab=\"chain length N\", ylab=\"Hamiltonian sparsity\", marker=:circle)" 682 | ] 683 | }, 684 | { 685 | "cell_type": "markdown", 686 | "metadata": {}, 687 | "source": [ 688 | "For $N\\gtrsim10$ almost all entries are zero! We should get rid of those and store $H$ as a sparse matrix." 689 | ] 690 | }, 691 | { 692 | "cell_type": "markdown", 693 | "metadata": {}, 694 | "source": [ 695 | "### Building the sparse Hamiltonian" 696 | ] 697 | }, 698 | { 699 | "cell_type": "markdown", 700 | "metadata": {}, 701 | "source": [ 702 | "Generally, we can bring a dense matrix into a sparse matrix format using the function `sparse`." 703 | ] 704 | }, 705 | { 706 | "cell_type": "code", 707 | "execution_count": null, 708 | "metadata": {}, 709 | "outputs": [], 710 | "source": [ 711 | "using SparseArrays\n", 712 | "H = TransverseFieldIsing(N=4,h=1)\n", 713 | "H |> sparse" 714 | ] 715 | }, 716 | { 717 | "cell_type": "markdown", 718 | "metadata": {}, 719 | "source": [ 720 | "Note that in this format, only the 80 non-zero entries are stored (rather than 256 elements).\n", 721 | "\n", 722 | "So, how do we have to modify our function `TransverseFieldIsing` to only keep track of non-zero elements during the Hamiltonian construction?\n", 723 | "\n", 724 | "It turns out it is as simple as initializing our Hamiltonian, identity, and pauli matrices as sparse matrices!" 725 | ] 726 | }, 727 | { 728 | "cell_type": "code", 729 | "execution_count": null, 730 | "metadata": {}, 731 | "outputs": [], 732 | "source": [ 733 | "function TransverseFieldIsing_sparse(;N,h)\n", 734 | " id = [1 0; 0 1] |> sparse\n", 735 | " σˣ = [0 1; 1 0] |> sparse\n", 736 | " σᶻ = [1 0; 0 -1] |> sparse\n", 737 | " \n", 738 | " first_term_ops = fill(id, N)\n", 739 | " first_term_ops[1] = σᶻ\n", 740 | " first_term_ops[2] = σᶻ\n", 741 | " \n", 742 | " second_term_ops = fill(id, N)\n", 743 | " second_term_ops[1] = σˣ\n", 744 | " \n", 745 | " H = spzeros(Int, 2^N, 2^N) # note the spzeros instead of zeros here\n", 746 | " for i in 1:N-1\n", 747 | " H -= foldl(⊗, first_term_ops)\n", 748 | " first_term_ops = circshift(first_term_ops,1)\n", 749 | " end\n", 750 | " \n", 751 | " for i in 1:N\n", 752 | " H -= h*foldl(⊗, second_term_ops)\n", 753 | " second_term_ops = circshift(second_term_ops,1)\n", 754 | " end\n", 755 | " H\n", 756 | "end" 757 | ] 758 | }, 759 | { 760 | "cell_type": "markdown", 761 | "metadata": {}, 762 | "source": [ 763 | "We should check that apart from the new type `SparseMatrixCSC` this is still the same Hamiltonian." 764 | ] 765 | }, 766 | { 767 | "cell_type": "code", 768 | "execution_count": null, 769 | "metadata": {}, 770 | "outputs": [], 771 | "source": [ 772 | "H = TransverseFieldIsing_sparse(N=10, h=1);" 773 | ] 774 | }, 775 | { 776 | "cell_type": "code", 777 | "execution_count": null, 778 | "metadata": {}, 779 | "outputs": [], 780 | "source": [ 781 | "H_dense = TransverseFieldIsing(N=10, h=1)\n", 782 | "H ≈ H_dense" 783 | ] 784 | }, 785 | { 786 | "cell_type": "markdown", 787 | "metadata": {}, 788 | "source": [ 789 | "Great. But is it really faster?" 790 | ] 791 | }, 792 | { 793 | "cell_type": "code", 794 | "execution_count": null, 795 | "metadata": {}, 796 | "outputs": [], 797 | "source": [ 798 | "@time TransverseFieldIsing(N=10,h=1);\n", 799 | "@time TransverseFieldIsing_sparse(N=10,h=1);" 800 | ] 801 | }, 802 | { 803 | "cell_type": "markdown", 804 | "metadata": {}, 805 | "source": [ 806 | "It is *a lot* faster!" 807 | ] 808 | }, 809 | { 810 | "cell_type": "markdown", 811 | "metadata": {}, 812 | "source": [ 813 | "Alright, let's try to go to larger $N$. While `TransverseFieldIsing` threw an `OutOfMemoryError` for `N=20`, our new function is more efficient:" 814 | ] 815 | }, 816 | { 817 | "cell_type": "code", 818 | "execution_count": null, 819 | "metadata": {}, 820 | "outputs": [], 821 | "source": [ 822 | "@time H = TransverseFieldIsing_sparse(N=20,h=1)" 823 | ] 824 | }, 825 | { 826 | "cell_type": "markdown", 827 | "metadata": {}, 828 | "source": [ 829 | "Note that this is matrix, formally, has **1,099,511,627,776** entries!" 830 | ] 831 | }, 832 | { 833 | "cell_type": "markdown", 834 | "metadata": {}, 835 | "source": [ 836 | "### Diagonalizing sparse matrices" 837 | ] 838 | }, 839 | { 840 | "cell_type": "markdown", 841 | "metadata": {}, 842 | "source": [ 843 | "We have taken the first hurdle of constructing our large-system Hamiltonian as a sparse matrix. Unfortunately, if we try to diagonalize $H$, we realize that Julia's built-in eigensolver `eigen` doesn't support matrices.\n", 844 | "\n", 845 | "```\n", 846 | "eigen(A) not supported for sparse matrices. Use for example eigs(A) from the Arpack package instead.\n", 847 | "```" 848 | ] 849 | }, 850 | { 851 | "cell_type": "markdown", 852 | "metadata": {}, 853 | "source": [ 854 | "Gladly it suggests a solution: [ARPACK.jl](https://github.com/JuliaLinearAlgebra/Arpack.jl). It provides a wrapper to the Fortran library [ARPACK](https://www.caam.rice.edu/software/ARPACK/) which implements iterative eigenvalue and singular value solvers for sparse matrices.\n", 855 | "\n", 856 | "There are also a bunch of pure Julia implementations available in\n", 857 | "\n", 858 | "* [ArnoldiMethod.jl](https://github.com/haampie/ArnoldiMethod.jl)\n", 859 | "* [KrylovKit.jl](https://github.com/Jutho/KrylovKit.jl)\n", 860 | "* [IterativeSolvers.jl](https://github.com/JuliaMath/IterativeSolvers.jl)\n", 861 | "\n", 862 | "Let us use the ArnoldiMethod.jl package." 863 | ] 864 | }, 865 | { 866 | "cell_type": "code", 867 | "execution_count": null, 868 | "metadata": {}, 869 | "outputs": [], 870 | "source": [ 871 | "using ArnoldiMethod\n", 872 | "\n", 873 | "function eigen_sparse(x)\n", 874 | " decomp, history = partialschur(x, nev=1, which=SR()); # only solve for the ground state\n", 875 | " vals, vecs = partialeigen(decomp);\n", 876 | " return vals, vecs\n", 877 | "end" 878 | ] 879 | }, 880 | { 881 | "cell_type": "markdown", 882 | "metadata": {}, 883 | "source": [ 884 | "Solving for the ground state takes less than a minute on an i5 desktop machine." 885 | ] 886 | }, 887 | { 888 | "cell_type": "code", 889 | "execution_count": null, 890 | "metadata": {}, 891 | "outputs": [], 892 | "source": [ 893 | "@time vals, vecs = eigen_sparse(H)" 894 | ] 895 | }, 896 | { 897 | "cell_type": "markdown", 898 | "metadata": {}, 899 | "source": [ 900 | "Voila. There we have the ground state energy and the ground state wave function for a $N=20$ chain of quantum spins!" 901 | ] 902 | }, 903 | { 904 | "cell_type": "code", 905 | "execution_count": null, 906 | "metadata": {}, 907 | "outputs": [], 908 | "source": [ 909 | "groundstate = vecs[:,1]" 910 | ] 911 | }, 912 | { 913 | "cell_type": "markdown", 914 | "metadata": {}, 915 | "source": [ 916 | "### Magnetization once again" 917 | ] 918 | }, 919 | { 920 | "cell_type": "markdown", 921 | "metadata": {}, 922 | "source": [ 923 | "To measure the magnetization, we could use our function `magnetization(state, basis)` from above. However, the way we wrote it above, it depends on an explicit list of basis states which we do not want to construct for a large system explicitly.\n", 924 | "\n", 925 | "Let's rewrite the function slightly such that bit representations of our basis states are calculated on the fly." 926 | ] 927 | }, 928 | { 929 | "cell_type": "code", 930 | "execution_count": null, 931 | "metadata": {}, 932 | "outputs": [], 933 | "source": [ 934 | "function magnetization(state)\n", 935 | " N = Int(log2(length(state)))\n", 936 | " M = 0.\n", 937 | " for i in 1:length(state)\n", 938 | " bstate = bit_rep(i-1,N)\n", 939 | " bstate_M = 0.\n", 940 | " for spin in bstate\n", 941 | " bstate_M += (state[i]^2 * (spin ? 1 : -1))/N\n", 942 | " end\n", 943 | " @assert abs(bstate_M) <= 1\n", 944 | " M += abs(bstate_M)\n", 945 | " end\n", 946 | " return M\n", 947 | "end" 948 | ] 949 | }, 950 | { 951 | "cell_type": "code", 952 | "execution_count": null, 953 | "metadata": {}, 954 | "outputs": [], 955 | "source": [ 956 | "magnetization(groundstate, basis)" 957 | ] 958 | }, 959 | { 960 | "cell_type": "markdown", 961 | "metadata": {}, 962 | "source": [ 963 | "We are now able to recreate our magnetization vs magnetic field strength plotincluding larger systems (takes about 3 minutes on this i5 Desktop machine)." 964 | ] 965 | }, 966 | { 967 | "cell_type": "code", 968 | "execution_count": null, 969 | "metadata": {}, 970 | "outputs": [], 971 | "source": [ 972 | "using Plots\n", 973 | "hs = 10 .^ range(-2., stop=2., length=10)\n", 974 | "Ns = 2:2:20\n", 975 | "p = plot()\n", 976 | "@time for N in Ns\n", 977 | " M = zeros(length(hs))\n", 978 | " for (i,h) in enumerate(hs)\n", 979 | " H = TransverseFieldIsing_sparse(N=N, h=h)\n", 980 | " vals, vecs = eigen_sparse(H)\n", 981 | " groundstate = @view vecs[:,1]\n", 982 | " M[i] = magnetization(groundstate)\n", 983 | " end\n", 984 | " plot!(p, hs, M, xscale=:log10, marker=:circle, label=\"N = $N\",\n", 985 | " xlab=\"h\", ylab=\"M(h)\")\n", 986 | " println(M)\n", 987 | "end\n", 988 | "p" 989 | ] 990 | } 991 | ], 992 | "metadata": { 993 | "@webio": { 994 | "lastCommId": null, 995 | "lastKernelId": null 996 | }, 997 | "kernelspec": { 998 | "display_name": "Julia 1.7.3", 999 | "language": "julia", 1000 | "name": "julia-1.7" 1001 | }, 1002 | "language_info": { 1003 | "file_extension": ".jl", 1004 | "mimetype": "application/julia", 1005 | "name": "julia", 1006 | "version": "1.7.3" 1007 | } 1008 | }, 1009 | "nbformat": 4, 1010 | "nbformat_minor": 4 1011 | } 1012 | -------------------------------------------------------------------------------- /2209_Munich/images/cqsl.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2209_Munich/images/cqsl.jpg -------------------------------------------------------------------------------- /2209_Munich/images/epfl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2209_Munich/images/epfl.png -------------------------------------------------------------------------------- /2209_Munich/images/epfl.svg: -------------------------------------------------------------------------------- 1 | 2 | 14 | 16 | 17 | 19 | image/svg+xml 20 | 22 | epfl-logo-new 23 | 24 | 25 | 26 | 28 | 30 | 31 | epfl-logo-new 33 | 37 | 41 | 45 | 50 | 54 | 58 | 62 | 66 | 67 | -------------------------------------------------------------------------------- /2209_Munich/images/netket_web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2209_Munich/images/netket_web.png -------------------------------------------------------------------------------- /2209_Munich/images/nk_authors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2209_Munich/images/nk_authors.png -------------------------------------------------------------------------------- /2209_Munich/images/nk_commits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2209_Munich/images/nk_commits.png -------------------------------------------------------------------------------- /2209_Munich/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "2209-munich" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Filippo Vicentini "] 6 | readme = "README.md" 7 | packages = [{include = "2209_munich"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = ">=3.8" 11 | netket = "^3.5.1" 12 | matplotlib = "^3.5.3" 13 | ipykernel = "^6.15.3" 14 | 15 | 16 | [build-system] 17 | requires = ["poetry-core"] 18 | build-backend = "poetry.core.masonry.api" 19 | -------------------------------------------------------------------------------- /2301_Pisa/images/cqsl.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2301_Pisa/images/cqsl.jpg -------------------------------------------------------------------------------- /2301_Pisa/images/epfl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2301_Pisa/images/epfl.png -------------------------------------------------------------------------------- /2301_Pisa/images/epfl.svg: -------------------------------------------------------------------------------- 1 | 2 | 14 | 16 | 17 | 19 | image/svg+xml 20 | 22 | epfl-logo-new 23 | 24 | 25 | 26 | 28 | 30 | 31 | epfl-logo-new 33 | 37 | 41 | 45 | 50 | 54 | 58 | 62 | 66 | 67 | -------------------------------------------------------------------------------- /2301_Pisa/images/netket_web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2301_Pisa/images/netket_web.png -------------------------------------------------------------------------------- /2301_Pisa/images/nk_authors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2301_Pisa/images/nk_authors.png -------------------------------------------------------------------------------- /2301_Pisa/images/nk_commits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PhilipVinc/Lectures/5ff4104c75577fbeb9b27a8d7505b46c3529304c/2301_Pisa/images/nk_commits.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Custom Filippo's Academic License 2 | 3 | In short: Permission is granted to use the material found in this repository for your own learning and for *your own personal* projects under the MIT license. 4 | 5 | If you want to use the material found in this repository to give classes or lectures, *you MUST* seek written permission from the owner of this repository (Filippo Vicentini) and properly attribute the material to the original owneer of this repository. 6 | 7 | Derivative work, which deviates substantially from the material of this repository, must also be properly attributed but does not need to seek consent. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lectures 2 | A collection of lectures and tutorials on NetKet, Machine Learning for Condensed Matter Physics and related topics. 3 | 4 | ## LICENSE Notice 5 | 6 | The material found in this repository is free to use for personal and/or individual reasons. 7 | If you are presenting, teaching or lecturing about the material found in this repository you *must* seek written permission from the owner of the repository unless what is presented is a derivative work, where derivative work is defined according to the GNU Public License stating that at least 80% of the material is not included in this repository. 8 | --------------------------------------------------------------------------------