├── COPT_Example.ipynb ├── Example.ipynb ├── README.md ├── media ├── 044f4b46af76df9340c0e21795944381.png ├── 81cbf99d80c180f7e810f50275b94d0f.png ├── a31aaba4a573c4e2a74723f5d555bdf2.png ├── df9889a4142bfb2523e1a67d849f72eb.png ├── ffa7d65c364918231fad932f5c088abe.png ├── je9idvsd97c0vxcj90ezk0e7v823b7asd.png └── media.pptx └── 当机器学习PyTorch遇到运筹学COPT.pdf /COPT_Example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [] 7 | }, 8 | "kernelspec": { 9 | "name": "python3", 10 | "display_name": "Python 3" 11 | }, 12 | "language_info": { 13 | "name": "python" 14 | } 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "colab": { 22 | "base_uri": "https://localhost:8080/" 23 | }, 24 | "id": "VBefPIvx73o7", 25 | "outputId": "08235a0f-0547-4008-9337-d49ecd443073" 26 | }, 27 | "outputs": [ 28 | { 29 | "output_type": "stream", 30 | "name": "stdout", 31 | "text": [ 32 | "Cloning into 'PyEPO'...\n", 33 | "remote: Enumerating objects: 126, done.\u001b[K\n", 34 | "remote: Counting objects: 100% (126/126), done.\u001b[K\n", 35 | "remote: Compressing objects: 100% (111/111), done.\u001b[K\n", 36 | "remote: Total 126 (delta 22), reused 65 (delta 12), pack-reused 0\u001b[K\n", 37 | "Receiving objects: 100% (126/126), 2.44 MiB | 12.68 MiB/s, done.\n", 38 | "Resolving deltas: 100% (22/22), done.\n", 39 | "Processing ./PyEPO/pkg\n", 40 | " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 41 | "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.5) (1.23.5)\n", 42 | "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.5) (1.11.3)\n", 43 | "Collecting pathos (from pyepo==0.3.5)\n", 44 | " Downloading pathos-0.3.1-py3-none-any.whl (82 kB)\n", 45 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m82.1/82.1 kB\u001b[0m \u001b[31m1.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 46 | "\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.5) (4.66.1)\n", 47 | "Collecting Pyomo>=6.1.2 (from pyepo==0.3.5)\n", 48 | " Downloading Pyomo-6.6.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (12.7 MB)\n", 49 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.7/12.7 MB\u001b[0m \u001b[31m21.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 50 | "\u001b[?25hCollecting gurobipy>=9.1.2 (from pyepo==0.3.5)\n", 51 | " Downloading gurobipy-10.0.3-cp310-cp310-manylinux2014_x86_64.whl (12.7 MB)\n", 52 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.7/12.7 MB\u001b[0m \u001b[31m44.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 53 | "\u001b[?25hRequirement already satisfied: scikit_learn in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.5) (1.2.2)\n", 54 | "Requirement already satisfied: torch>=1.13.1 in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.5) (2.1.0+cu118)\n", 55 | "Collecting ply (from Pyomo>=6.1.2->pyepo==0.3.5)\n", 56 | " Downloading ply-3.11-py2.py3-none-any.whl (49 kB)\n", 57 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.6/49.6 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 58 | "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (3.13.1)\n", 59 | "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (4.5.0)\n", 60 | "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (1.12)\n", 61 | "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (3.2.1)\n", 62 | "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (3.1.2)\n", 63 | "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (2023.6.0)\n", 64 | "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.5) (2.1.0)\n", 65 | "Collecting ppft>=1.7.6.7 (from pathos->pyepo==0.3.5)\n", 66 | " Downloading ppft-1.7.6.7-py3-none-any.whl (56 kB)\n", 67 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.8/56.8 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 68 | "\u001b[?25hCollecting dill>=0.3.7 (from pathos->pyepo==0.3.5)\n", 69 | " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n", 70 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m9.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 71 | "\u001b[?25hCollecting pox>=0.3.3 (from pathos->pyepo==0.3.5)\n", 72 | " Downloading pox-0.3.3-py3-none-any.whl (29 kB)\n", 73 | "Collecting multiprocess>=0.70.15 (from pathos->pyepo==0.3.5)\n", 74 | " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n", 75 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 76 | "\u001b[?25hRequirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit_learn->pyepo==0.3.5) (1.3.2)\n", 77 | "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit_learn->pyepo==0.3.5) (3.2.0)\n", 78 | "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.13.1->pyepo==0.3.5) (2.1.3)\n", 79 | "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.13.1->pyepo==0.3.5) (1.3.0)\n", 80 | "Building wheels for collected packages: pyepo\n", 81 | " Building wheel for pyepo (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 82 | " Created wheel for pyepo: filename=pyepo-0.3.5-py3-none-any.whl size=41276 sha256=96bc8f8cc8878f13583d4f1c9ca87b9285567a45e1a6e53360f29f09fa3c3163\n", 83 | " Stored in directory: /tmp/pip-ephem-wheel-cache-0vmfmjwl/wheels/46/e0/92/19132b049913f800d1a8d6e61b81fe00bcac3f9d11ef30d2a5\n", 84 | "Successfully built pyepo\n", 85 | "Installing collected packages: ply, gurobipy, Pyomo, ppft, pox, dill, multiprocess, pathos, pyepo\n", 86 | "Successfully installed Pyomo-6.6.2 dill-0.3.7 gurobipy-10.0.3 multiprocess-0.70.15 pathos-0.3.1 ply-3.11 pox-0.3.3 ppft-1.7.6.7 pyepo-0.3.5\n", 87 | "Collecting coptpy\n", 88 | " Downloading coptpy-7.0.3-cp310-cp310-manylinux2014_x86_64.whl (10.7 MB)\n", 89 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.7/10.7 MB\u001b[0m \u001b[31m30.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 90 | "\u001b[?25hInstalling collected packages: coptpy\n", 91 | "Successfully installed coptpy-7.0.3\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "# download\n", 97 | "!git clone -b main --depth 1 https://github.com/khalil-research/PyEPO.git\n", 98 | "# install\n", 99 | "!pip install PyEPO/pkg/.\n", 100 | "!pip install coptpy" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "source": [ 106 | "## Build optModel" 107 | ], 108 | "metadata": { 109 | "id": "HjxG2TL58ZFb" 110 | } 111 | }, 112 | { 113 | "cell_type": "code", 114 | "source": [ 115 | "from coptpy import COPT\n", 116 | "from coptpy import Envr\n", 117 | "from pyepo.model.copt import optCoptModel\n", 118 | "\n", 119 | "class myOptModel(optCoptModel):\n", 120 | " def _getModel(self):\n", 121 | " # ceate a model\n", 122 | " m = Envr().createModel()\n", 123 | " # varibles\n", 124 | " x = m.addVars(5, nameprefix='x', vtype=COPT.BINARY)\n", 125 | " # sense\n", 126 | " m.setObjSense(COPT.MAXIMIZE)\n", 127 | " # constraints\n", 128 | " m.addConstr(3*x[0]+4*x[1]+3*x[2]+6*x[3]+4*x[4]<=12)\n", 129 | " m.addConstr(4*x[0]+5*x[1]+2*x[2]+3*x[3]+5*x[4]<=10)\n", 130 | " m.addConstr(5*x[0]+4*x[1]+6*x[2]+2*x[3]+3*x[4]<=15)\n", 131 | " return m, x\n", 132 | "\n", 133 | "optmodel = myOptModel()" 134 | ], 135 | "metadata": { 136 | "id": "nRbnYdWx8M8L", 137 | "colab": { 138 | "base_uri": "https://localhost:8080/" 139 | }, 140 | "outputId": "71ab9904-d93a-4f67-e2ea-d2c99aa3c941" 141 | }, 142 | "execution_count": 2, 143 | "outputs": [ 144 | { 145 | "output_type": "stream", 146 | "name": "stdout", 147 | "text": [ 148 | "Auto-Sklearn cannot be imported.\n", 149 | "Cardinal Optimizer v7.0.3. Build date Nov 14 2023\n", 150 | "Copyright Cardinal Operations 2023. All Rights Reserved\n", 151 | "\n" 152 | ] 153 | } 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "source": [ 159 | "## Problem Data" 160 | ], 161 | "metadata": { 162 | "id": "-vsQ0Vre84_1" 163 | } 164 | }, 165 | { 166 | "cell_type": "code", 167 | "source": [ 168 | "import torch\n", 169 | "torch.manual_seed(42)\n", 170 | "\n", 171 | "num_data = 1000 # number of data\n", 172 | "num_feat = 5 # feature dimention\n", 173 | "num_cost = 5 # cost dimention\n", 174 | "\n", 175 | "# randomly generate data\n", 176 | "x_true = torch.rand(num_data, num_feat) # feature\n", 177 | "weight_true = torch.rand(num_feat, num_cost) # weight\n", 178 | "bias_true = torch.randn(num_cost) # bias\n", 179 | "noise = 0.5 * torch.randn(num_data, num_cost) # random noise\n", 180 | "c_true = x_true @ weight_true + bias_true + noise # cost coef" 181 | ], 182 | "metadata": { 183 | "id": "-rqtFovN8-Gc" 184 | }, 185 | "execution_count": 3, 186 | "outputs": [] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "source": [ 191 | "# split train test data\n", 192 | "from sklearn.model_selection import train_test_split\n", 193 | "x_train, x_test, c_train, c_test = train_test_split(x_true, c_true, test_size=200, random_state=42)\n", 194 | "\n", 195 | "# build optDataset\n", 196 | "from pyepo.data.dataset import optDataset\n", 197 | "dataset_train = optDataset(optmodel, x_train, c_train)\n", 198 | "dataset_test = optDataset(optmodel, x_test, c_test)\n", 199 | "\n", 200 | "# build DataLoader\n", 201 | "from torch.utils.data import DataLoader\n", 202 | "batch_size = 32\n", 203 | "loader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)\n", 204 | "loader_test = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)" 205 | ], 206 | "metadata": { 207 | "id": "Tk_wcRiq_5_K", 208 | "colab": { 209 | "base_uri": "https://localhost:8080/" 210 | }, 211 | "outputId": "4c2a08b9-f01b-4d59-cdfb-b0aabac162c4" 212 | }, 213 | "execution_count": 4, 214 | "outputs": [ 215 | { 216 | "output_type": "stream", 217 | "name": "stdout", 218 | "text": [ 219 | "Optimizing for optDataset...\n" 220 | ] 221 | }, 222 | { 223 | "output_type": "stream", 224 | "name": "stderr", 225 | "text": [ 226 | "100%|██████████| 800/800 [00:04<00:00, 162.41it/s]\n" 227 | ] 228 | }, 229 | { 230 | "output_type": "stream", 231 | "name": "stdout", 232 | "text": [ 233 | "Optimizing for optDataset...\n" 234 | ] 235 | }, 236 | { 237 | "output_type": "stream", 238 | "name": "stderr", 239 | "text": [ 240 | "100%|██████████| 200/200 [00:00<00:00, 258.13it/s]\n" 241 | ] 242 | } 243 | ] 244 | }, 245 | { 246 | "cell_type": "markdown", 247 | "source": [ 248 | "## Build Prediction Model" 249 | ], 250 | "metadata": { 251 | "id": "ZENnTDsKEwho" 252 | } 253 | }, 254 | { 255 | "cell_type": "code", 256 | "source": [ 257 | "import torch\n", 258 | "from torch import nn\n", 259 | "\n", 260 | "# build linear model\n", 261 | "class LinearRegression(nn.Module):\n", 262 | " def __init__(self):\n", 263 | " super(LinearRegression, self).__init__()\n", 264 | " self.linear = nn.Linear(num_feat, num_cost)\n", 265 | "\n", 266 | " def forward(self, x):\n", 267 | " out = self.linear(x)\n", 268 | " return out\n", 269 | "\n", 270 | "# init model\n", 271 | "reg = LinearRegression()\n", 272 | "# cuda\n", 273 | "if torch.cuda.is_available():\n", 274 | " reg = reg.cuda()" 275 | ], 276 | "metadata": { 277 | "id": "g2mNrexpEvhS" 278 | }, 279 | "execution_count": 5, 280 | "outputs": [] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "source": [ 285 | "## AutoGrad Module for Optimization" 286 | ], 287 | "metadata": { 288 | "id": "oyDZry1AD99r" 289 | } 290 | }, 291 | { 292 | "cell_type": "code", 293 | "source": [ 294 | "import pyepo\n", 295 | "\n", 296 | "# init SPO+ loss\n", 297 | "spop = pyepo.func.SPOPlus(optmodel, processes=2)\n", 298 | "# init PFY loss\n", 299 | "pfy = pyepo.func.perturbedFenchelYoung(optmodel, n_samples=3, sigma=1.0, processes=2)\n", 300 | "# init NCE loss\n", 301 | "nce = pyepo.func.NCE(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train)" 302 | ], 303 | "metadata": { 304 | "colab": { 305 | "base_uri": "https://localhost:8080/" 306 | }, 307 | "id": "4Ykku_3eDsNc", 308 | "outputId": "11914b01-a0f5-4489-a993-e99d3e229cae" 309 | }, 310 | "execution_count": 6, 311 | "outputs": [ 312 | { 313 | "output_type": "stream", 314 | "name": "stdout", 315 | "text": [ 316 | "Num of cores: 2\n", 317 | "Num of cores: 2\n", 318 | "Num of cores: 2\n" 319 | ] 320 | } 321 | ] 322 | }, 323 | { 324 | "cell_type": "code", 325 | "source": [ 326 | " # set adam optimizer\n", 327 | " optimizer = torch.optim.Adam(reg.parameters(), lr=5e-3)\n", 328 | "\n", 329 | " # train mode\n", 330 | " reg.train()\n", 331 | " for epoch in range(5):\n", 332 | " # load data\n", 333 | " for i, data in enumerate(loader_train):\n", 334 | " x, c, w, z = data # feat, cost, sol, obj\n", 335 | " # cuda\n", 336 | " if torch.cuda.is_available():\n", 337 | " x, c, w, z = x.cuda(), c.cuda(), w.cuda(), z.cuda()\n", 338 | " # forward pass\n", 339 | " cp = reg(x)\n", 340 | " loss = spop(cp, c, w, z)\n", 341 | " # backward pass\n", 342 | " optimizer.zero_grad()\n", 343 | " loss.backward()\n", 344 | " optimizer.step()\n", 345 | " # log\n", 346 | " regret = pyepo.metric.regret(reg, optmodel, loader_test)\n", 347 | " print(\"Loss: {:9.4f}, Regret: {:7.4f}%\".format(loss.item(), regret*100))" 348 | ], 349 | "metadata": { 350 | "colab": { 351 | "base_uri": "https://localhost:8080/" 352 | }, 353 | "id": "Ets2ndfSFnag", 354 | "outputId": "f7d9b903-9f95-44fd-89e8-055c1a430ba9" 355 | }, 356 | "execution_count": 7, 357 | "outputs": [ 358 | { 359 | "output_type": "stream", 360 | "name": "stdout", 361 | "text": [ 362 | "Loss: 6.5576, Regret: 21.4272%\n", 363 | "Loss: 3.9369, Regret: 0.1530%\n", 364 | "Loss: 2.1917, Regret: 0.1530%\n", 365 | "Loss: 0.7715, Regret: 0.1530%\n", 366 | "Loss: 0.4534, Regret: 0.1530%\n" 367 | ] 368 | } 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "source": [], 374 | "metadata": { 375 | "id": "_HJZ2cBIHhks" 376 | }, 377 | "execution_count": 7, 378 | "outputs": [] 379 | } 380 | ] 381 | } -------------------------------------------------------------------------------- /Example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [] 7 | }, 8 | "kernelspec": { 9 | "name": "python3", 10 | "display_name": "Python 3" 11 | }, 12 | "language_info": { 13 | "name": "python" 14 | } 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": { 21 | "colab": { 22 | "base_uri": "https://localhost:8080/" 23 | }, 24 | "id": "VBefPIvx73o7", 25 | "outputId": "1d617544-3fdc-4b2a-d3c5-7a98afa30707" 26 | }, 27 | "outputs": [ 28 | { 29 | "output_type": "stream", 30 | "name": "stdout", 31 | "text": [ 32 | "Cloning into 'PyEPO'...\n", 33 | "remote: Enumerating objects: 119, done.\u001b[K\n", 34 | "remote: Counting objects: 100% (119/119), done.\u001b[K\n", 35 | "remote: Compressing objects: 100% (107/107), done.\u001b[K\n", 36 | "remote: Total 119 (delta 18), reused 63 (delta 9), pack-reused 0\u001b[K\n", 37 | "Receiving objects: 100% (119/119), 2.43 MiB | 17.93 MiB/s, done.\n", 38 | "Resolving deltas: 100% (18/18), done.\n", 39 | "Processing ./PyEPO/pkg\n", 40 | " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 41 | "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.3) (1.22.4)\n", 42 | "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.3) (1.10.1)\n", 43 | "Collecting pathos (from pyepo==0.3.3)\n", 44 | " Downloading pathos-0.3.0-py3-none-any.whl (79 kB)\n", 45 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.8/79.8 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 46 | "\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.3) (4.65.0)\n", 47 | "Collecting Pyomo>=6.1.2 (from pyepo==0.3.3)\n", 48 | " Downloading Pyomo-6.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.9 MB)\n", 49 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m43.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 50 | "\u001b[?25hCollecting gurobipy>=9.1.2 (from pyepo==0.3.3)\n", 51 | " Downloading gurobipy-10.0.2-cp310-cp310-manylinux2014_x86_64.whl (12.7 MB)\n", 52 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.7/12.7 MB\u001b[0m \u001b[31m51.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 53 | "\u001b[?25hRequirement already satisfied: scikit_learn in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.3) (1.2.2)\n", 54 | "Requirement already satisfied: torch>=1.13.1 in /usr/local/lib/python3.10/dist-packages (from pyepo==0.3.3) (2.0.1+cu118)\n", 55 | "Collecting ply (from Pyomo>=6.1.2->pyepo==0.3.3)\n", 56 | " Downloading ply-3.11-py2.py3-none-any.whl (49 kB)\n", 57 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.6/49.6 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 58 | "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (3.12.2)\n", 59 | "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (4.7.1)\n", 60 | "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (1.11.1)\n", 61 | "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (3.1)\n", 62 | "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (3.1.2)\n", 63 | "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13.1->pyepo==0.3.3) (2.0.0)\n", 64 | "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.13.1->pyepo==0.3.3) (3.25.2)\n", 65 | "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.13.1->pyepo==0.3.3) (16.0.6)\n", 66 | "Collecting ppft>=1.7.6.6 (from pathos->pyepo==0.3.3)\n", 67 | " Downloading ppft-1.7.6.6-py3-none-any.whl (52 kB)\n", 68 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.8/52.8 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 69 | "\u001b[?25hCollecting dill>=0.3.6 (from pathos->pyepo==0.3.3)\n", 70 | " Downloading dill-0.3.6-py3-none-any.whl (110 kB)\n", 71 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m110.5/110.5 kB\u001b[0m \u001b[31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 72 | "\u001b[?25hCollecting pox>=0.3.2 (from pathos->pyepo==0.3.3)\n", 73 | " Downloading pox-0.3.2-py3-none-any.whl (29 kB)\n", 74 | "Collecting multiprocess>=0.70.14 (from pathos->pyepo==0.3.3)\n", 75 | " Downloading multiprocess-0.70.14-py310-none-any.whl (134 kB)\n", 76 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.3/134.3 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", 77 | "\u001b[?25hRequirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit_learn->pyepo==0.3.3) (1.3.1)\n", 78 | "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit_learn->pyepo==0.3.3) (3.1.0)\n", 79 | "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.13.1->pyepo==0.3.3) (2.1.3)\n", 80 | "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.13.1->pyepo==0.3.3) (1.3.0)\n", 81 | "Building wheels for collected packages: pyepo\n", 82 | " Building wheel for pyepo (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 83 | " Created wheel for pyepo: filename=pyepo-0.3.3-py3-none-any.whl size=35277 sha256=d0e7f6ad71648ef6c426f82e58e6bc14120747a28b4258c67c3d73c5e1f29fac\n", 84 | " Stored in directory: /tmp/pip-ephem-wheel-cache-ud_rp2_g/wheels/46/e0/92/19132b049913f800d1a8d6e61b81fe00bcac3f9d11ef30d2a5\n", 85 | "Successfully built pyepo\n", 86 | "Installing collected packages: ply, gurobipy, Pyomo, ppft, pox, dill, multiprocess, pathos, pyepo\n", 87 | "Successfully installed Pyomo-6.6.1 dill-0.3.6 gurobipy-10.0.2 multiprocess-0.70.14 pathos-0.3.0 ply-3.11 pox-0.3.2 ppft-1.7.6.6 pyepo-0.3.3\n" 88 | ] 89 | } 90 | ], 91 | "source": [ 92 | "# download\n", 93 | "!git clone -b main --depth 1 https://github.com/khalil-research/PyEPO.git\n", 94 | "# install\n", 95 | "!pip install PyEPO/pkg/." 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "source": [ 101 | "## Build optModel" 102 | ], 103 | "metadata": { 104 | "id": "HjxG2TL58ZFb" 105 | } 106 | }, 107 | { 108 | "cell_type": "code", 109 | "source": [ 110 | "import gurobipy as gp\n", 111 | "from gurobipy import GRB\n", 112 | "from pyepo.model.grb import optGrbModel\n", 113 | "\n", 114 | "class myOptModel(optGrbModel):\n", 115 | " def _getModel(self):\n", 116 | " # ceate a model\n", 117 | " m = gp.Model()\n", 118 | " # varibles\n", 119 | " x = m.addVars(5, name=\"x\", vtype=GRB.BINARY)\n", 120 | " # sense\n", 121 | " m.modelSense = GRB.MAXIMIZE\n", 122 | " # constraints\n", 123 | " m.addConstr(3*x[0]+4*x[1]+3*x[2]+6*x[3]+4*x[4]<=12)\n", 124 | " m.addConstr(4*x[0]+5*x[1]+2*x[2]+3*x[3]+5*x[4]<=10)\n", 125 | " m.addConstr(5*x[0]+4*x[1]+6*x[2]+2*x[3]+3*x[4]<=15)\n", 126 | " return m, x\n", 127 | "\n", 128 | "optmodel = myOptModel()" 129 | ], 130 | "metadata": { 131 | "id": "nRbnYdWx8M8L", 132 | "colab": { 133 | "base_uri": "https://localhost:8080/" 134 | }, 135 | "outputId": "e5c8475a-e741-4581-ff81-aa366987d66a" 136 | }, 137 | "execution_count": null, 138 | "outputs": [ 139 | { 140 | "output_type": "stream", 141 | "name": "stdout", 142 | "text": [ 143 | "Auto-Sklearn cannot be imported.\n", 144 | "Restricted license - for non-production use only - expires 2024-10-28\n" 145 | ] 146 | } 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "source": [ 152 | "## Problem Data" 153 | ], 154 | "metadata": { 155 | "id": "-vsQ0Vre84_1" 156 | } 157 | }, 158 | { 159 | "cell_type": "code", 160 | "source": [ 161 | "import torch\n", 162 | "torch.manual_seed(42)\n", 163 | "\n", 164 | "num_data = 1000 # number of data\n", 165 | "num_feat = 5 # feature dimention\n", 166 | "num_cost = 5 # cost dimention\n", 167 | "\n", 168 | "# randomly generate data\n", 169 | "x_true = torch.rand(num_data, num_feat) # feature\n", 170 | "weight_true = torch.rand(num_feat, num_cost) # weight\n", 171 | "bias_true = torch.randn(num_cost) # bias\n", 172 | "noise = 0.5 * torch.randn(num_data, num_cost) # random noise\n", 173 | "c_true = x_true @ weight_true + bias_true + noise # cost coef" 174 | ], 175 | "metadata": { 176 | "id": "-rqtFovN8-Gc" 177 | }, 178 | "execution_count": null, 179 | "outputs": [] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "source": [ 184 | "# split train test data\n", 185 | "from sklearn.model_selection import train_test_split\n", 186 | "x_train, x_test, c_train, c_test = train_test_split(x_true, c_true, test_size=200, random_state=42)\n", 187 | "\n", 188 | "# build optDataset\n", 189 | "from pyepo.data.dataset import optDataset\n", 190 | "dataset_train = optDataset(optmodel, x_train, c_train)\n", 191 | "dataset_test = optDataset(optmodel, x_test, c_test)\n", 192 | "\n", 193 | "# build DataLoader\n", 194 | "from torch.utils.data import DataLoader\n", 195 | "batch_size = 32\n", 196 | "loader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)\n", 197 | "loader_test = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)" 198 | ], 199 | "metadata": { 200 | "id": "Tk_wcRiq_5_K", 201 | "colab": { 202 | "base_uri": "https://localhost:8080/" 203 | }, 204 | "outputId": "7868f823-6afc-4185-9eb8-b348516462af" 205 | }, 206 | "execution_count": null, 207 | "outputs": [ 208 | { 209 | "output_type": "stream", 210 | "name": "stdout", 211 | "text": [ 212 | "Optimizing for optDataset...\n" 213 | ] 214 | }, 215 | { 216 | "output_type": "stream", 217 | "name": "stderr", 218 | "text": [ 219 | "100%|██████████| 800/800 [00:00<00:00, 1153.06it/s]\n" 220 | ] 221 | }, 222 | { 223 | "output_type": "stream", 224 | "name": "stdout", 225 | "text": [ 226 | "Optimizing for optDataset...\n" 227 | ] 228 | }, 229 | { 230 | "output_type": "stream", 231 | "name": "stderr", 232 | "text": [ 233 | "100%|██████████| 200/200 [00:00<00:00, 823.20it/s]\n" 234 | ] 235 | } 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "source": [ 241 | "## Build Prediction Model" 242 | ], 243 | "metadata": { 244 | "id": "ZENnTDsKEwho" 245 | } 246 | }, 247 | { 248 | "cell_type": "code", 249 | "source": [ 250 | "import torch\n", 251 | "from torch import nn\n", 252 | "\n", 253 | "# build linear model\n", 254 | "class LinearRegression(nn.Module):\n", 255 | " def __init__(self):\n", 256 | " super(LinearRegression, self).__init__()\n", 257 | " self.linear = nn.Linear(num_feat, num_cost)\n", 258 | "\n", 259 | " def forward(self, x):\n", 260 | " out = self.linear(x)\n", 261 | " return out\n", 262 | "\n", 263 | "# init model\n", 264 | "reg = LinearRegression()\n", 265 | "# cuda\n", 266 | "if torch.cuda.is_available():\n", 267 | " reg = reg.cuda()" 268 | ], 269 | "metadata": { 270 | "id": "g2mNrexpEvhS" 271 | }, 272 | "execution_count": null, 273 | "outputs": [] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "source": [ 278 | "## AutoGrad Module for Optimization" 279 | ], 280 | "metadata": { 281 | "id": "oyDZry1AD99r" 282 | } 283 | }, 284 | { 285 | "cell_type": "code", 286 | "source": [ 287 | "import pyepo\n", 288 | "\n", 289 | "# init SPO+ loss\n", 290 | "spop = pyepo.func.SPOPlus(optmodel, processes=2)\n", 291 | "# init PFY loss\n", 292 | "pfy = pyepo.func.perturbedFenchelYoung(optmodel, n_samples=3, sigma=1.0, processes=2)\n", 293 | "# init NCE loss\n", 294 | "nce = pyepo.func.NCE(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train)" 295 | ], 296 | "metadata": { 297 | "colab": { 298 | "base_uri": "https://localhost:8080/" 299 | }, 300 | "id": "4Ykku_3eDsNc", 301 | "outputId": "80a10b75-1879-4fee-d933-9496d148203a" 302 | }, 303 | "execution_count": null, 304 | "outputs": [ 305 | { 306 | "output_type": "stream", 307 | "name": "stdout", 308 | "text": [ 309 | "Num of cores: 2\n", 310 | "Num of cores: 2\n", 311 | "Num of cores: 2\n" 312 | ] 313 | } 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "source": [ 319 | " # set adam optimizer\n", 320 | " optimizer = torch.optim.Adam(reg.parameters(), lr=5e-3)\n", 321 | "\n", 322 | " # train mode\n", 323 | " reg.train()\n", 324 | " for epoch in range(5):\n", 325 | " # load data\n", 326 | " for i, data in enumerate(loader_train):\n", 327 | " x, c, w, z = data # feat, cost, sol, obj\n", 328 | " # cuda\n", 329 | " if torch.cuda.is_available():\n", 330 | " x, c, w, z = x.cuda(), c.cuda(), w.cuda(), z.cuda()\n", 331 | " # forward pass\n", 332 | " cp = reg(x)\n", 333 | " loss = spop(cp, c, w, z)\n", 334 | " # backward pass\n", 335 | " optimizer.zero_grad()\n", 336 | " loss.backward()\n", 337 | " optimizer.step()\n", 338 | " # log\n", 339 | " regret = pyepo.metric.regret(reg, optmodel, loader_test)\n", 340 | " print(\"Loss: {:9.4f}, Regret: {:7.4f}%\".format(loss.item(), regret*100))" 341 | ], 342 | "metadata": { 343 | "colab": { 344 | "base_uri": "https://localhost:8080/" 345 | }, 346 | "id": "Ets2ndfSFnag", 347 | "outputId": "e21952e5-626f-4421-c4e8-3f55bb31cb40" 348 | }, 349 | "execution_count": null, 350 | "outputs": [ 351 | { 352 | "output_type": "stream", 353 | "name": "stdout", 354 | "text": [ 355 | "Loss: 6.5576, Regret: 21.4272%\n", 356 | "Loss: 3.9369, Regret: 0.1530%\n", 357 | "Loss: 2.1917, Regret: 0.1530%\n", 358 | "Loss: 0.7715, Regret: 0.1530%\n", 359 | "Loss: 0.4534, Regret: 0.1530%\n" 360 | ] 361 | } 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "source": [], 367 | "metadata": { 368 | "id": "_HJZ2cBIHhks" 369 | }, 370 | "execution_count": null, 371 | "outputs": [] 372 | } 373 | ] 374 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 当机器学习遇上运筹学:PyEPO与端对端预测后优化 2 | 3 | #### 摘要: 4 | 5 | 本文将探讨端对端预测后优化(End-to-End Predict-then-Optimize),这一方法结合了运筹学和机器学习的优势。我们将解析其工作原理,与传统方法进行比较,展示多种算法,并运用开源工具PyEPO进行演示。我们希望通过这篇文章,读者可以对这个领域有更深入的理解。 6 | 7 | #### 编者按: 8 | 9 | 这篇文章我想要写已经很久了,毕竟“端对端预测后优化”(End-to-End Predict-then-Optimize)正是我读博期间的主要研究方向,但我又一直迟迟没能下笔。想说自己杂事缠身(实际是重度拖延症晚期的缘故),更主要的原因恐怕是我对这一领域理解依然尚浅,尤其我希望以综述的形式,为读者提供详尽的介绍。然而,这篇文章并不能称为一篇综述,原因有二:一方面,我虽然进行相关的研究,但还无法自称为专家;另一方面,"端对端预测后优化"还处于起步阶段,有很大的探索空间,尚有无穷可能。因此,此时编写综述可能为时尚早。因此,我选择用这篇文章抛砖引玉,旨在引发关于这个领域的进一步探讨和思考。 10 | 11 | 12 | ## 1 引言 13 | 14 | 运筹学和统计学/数据科学/机器学习的紧密关系由来已久。机器学习通过挖掘数据,预测未知或不确定的信息,一旦得到预测结果,常常需要进一步的决策行动来获取收益。而运筹学作为建模求解最优化问题的工具,尽管可以(相对)高效地找到最优解,但一大限制是通常需要参数(无论是本身还是其分布)的确定性,无法充分利用数据。 15 | 16 | 本文就是要讨论数据驱动下,带有不确定参数的优化问题。这种问题通常通过“预测后优化”的范式来解决。这一问题在现实生产生活中有着深远的意义。举例来说,车辆路径规划中,由于交通状况的不断变化,在每段道路的行驶时间是不确定的;电网调度中,不同地区的电力负荷也会随时间发生变化;投资组合中,金融资产的收益率会受到市场波动的影响。以上这些情况都涉及到优化模型参数的不确定性,但是,我们可以利用时间、天气、金融因素等特征,预测这些不确定的参数,从而进行最优决策。 17 | 18 | 此外,本文也会介绍一个端对端预测后优化的开源框架PyEPO ()。PyEPO基于PyTorch, 主要针对(但不限于)线性规划(LP)和整数线性规划(ILP),集成了文中提到的多种算法,并提供了COPT、Gurobi、Pyomo等优化建模工具的API。PyEPO可以作为PyTorch的autograd模块进行深度学习的训练和测试,使用起来简洁明了。这个框架的设计目标是为广大学界和业界用户提供便捷的工具,帮助大家更好地理解和应用端对端预测后优化方法。 19 | 20 | 21 | ## 2 问题描述和符号 22 | 23 | 首先,请各位读者放心,本文并不打算深入挖掘端对端预测后优化背后的数学推导,而是致力于提供一些直观的理解。 24 | 25 | 我们以一个简单的线性优化问题为例: 26 | 27 | $$ 28 | \begin{aligned} 29 | \underset{w_1,w_2}{\max} \quad & c_1 w_1+c_2 w_2 \\ 30 | s.t. \quad & w_1 + w_2 \leq 1 \\ 31 | & w_1, w_2 \geq 0 32 | \end{aligned} 33 | $$ 34 | 35 | 在这里,$\mathbf{w} = (w_1,w_2)$代表的是决策变量,$\mathbf{W} = \lbrace w_1 + w_2 ≤ 1,w_1, w_2 ≥ 0 \rbrace$定义了可行域,而$\mathbf{c}=(c_1,c_2)$就是我们不确定的成本向量。 36 | 37 | 给定成本向量$\mathbf{c}$,由于退化问题的存在,优化问题可能得到多个最优解,但可以假定使用某种特定的求解器(如Gurobi)时,只返回唯一一个最优解$\mathbf{w}^* (\mathbf{c})$。 38 | 39 | 有一组数据$\mathbf{D} = \lbrace(\mathbf{x}^1,\mathbf{c}^1), (\mathbf{x}^2,\mathbf{c}^2), ⋯, (\mathbf{x}^n,\mathbf{c}^n)\rbrace$,其中$\mathbf{x}$为数据特征,我们可以利用机器学习模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$来最小化某个损失函数$\ell(\mathbf{g}(\mathbf{x},\boldsymbol{\theta}),\mathbf{c})$。其中,$\boldsymbol{\theta}$是模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$的参数,会在训练过程中不断更新,而$\hat{\mathbf{c}} = \mathbf{g}(\mathbf{x},\boldsymbol{\theta})$则是成本向量$\mathbf{c}$的预测值。由此我们可以利用数据驱动的方式来预测不确定的参数,帮助实现优化决策。 40 | 41 | ## 3 什么是端对端预测后优化? 42 | 43 | 在回答这个问题之前,我们首先需要理解端对端学习(End-to-End Learning)的理念。端对端的这个“端”,指的是输入端和输出端。相比于传统的分步骤方式,它并不依赖于中间过程的手动特征工程或者人为设计的步骤,而直接构建从输入到输出的映射,以一种更直接和自动的方式解决问题。这种简化不仅减轻了手动特征工程的负担,而且通过学习直接的映射,减少了对中间结果的依赖,有可能发现传统方法难以发现的模式,从而提高整体的性能。端对端学习作为一个整体,使得整个系统变得更简洁,有利于处理复杂和高维度的问题。 44 | 45 | ![End-to-End Pipeline Framework](media/df9889a4142bfb2523e1a67d849f72eb.png) 46 | 47 | 对于端对端预测后优化,我们在训练机器学习模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$的过程中,模型预测了成本向量$\hat{\mathbf{c}} = \mathbf{g}(\mathbf{x}, \boldsymbol{\theta})$,然后通过求解器得到最优解$\mathbf{w}^* (\hat{\mathbf{c}}) = \underset{\mathbf{w} \in \mathbf{W}}{\min} \hat{\mathbf{c}}^{\top} \mathbf{w}$,并计算损失函数$\ell(\hat{\mathbf{c}}, \mathbf{c})$来直接衡量决策损失。 48 | 49 | 借助链式法则,我们能够计算出模型参数$\boldsymbol{\theta}$相对于损失函数$\ell(\hat{\mathbf{c}}, \mathbf{c})$的梯度,用于更新模型参数: 50 | 51 | $$ 52 | \begin{aligned} 53 | \frac{\partial \ell(\hat{\mathbf{c}}, \mathbf{c})}{\partial \boldsymbol{\theta}} 54 | & = \frac{\partial \ell(\hat{\mathbf{c}}, \mathbf{c})}{\partial \hat{\mathbf{c}}} \frac{\partial \hat{\mathbf{c}}}{\partial \boldsymbol{\theta}} \\ 55 | & = \frac{\partial \ell(\hat{\mathbf{c}}, \mathbf{c})}{\partial \mathbf{w}^* (\hat{\mathbf{c}})} \frac{\partial \mathbf{w}^* (\hat{\mathbf{c}})}{\partial \hat{\mathbf{c}}} \frac{\partial \hat{\mathbf{c}}}{\partial \boldsymbol{\theta}} 56 | \end{aligned} 57 | $$ 58 | 59 | 显然,对于依赖于链式法则进行反向传播的模型(如神经网络),关键部分是计算求解过程的梯度$\frac{\partial \mathbf{w}^* (\hat{\mathbf{c}})}{\partial \hat{\mathbf{c}}}$。端对端预测后优化的各类算法几乎都是在此基础上展开的。然而,在此,我们先不深入讨论这些算法,因为我们我们必须先回答一个更为重要,也是更致命的问题: 60 | 61 | 62 | 63 | ## 4 为什么要使用端对端预测后优化? 64 | 65 | ### 4.1 关于两阶段的预测后优化 66 | 67 | 毫无疑问,采用两阶段的预测后优化,即将机器学习预测模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$和优化求解器$\mathbf{w}^* (\mathbf{c})$独立使用,看似是一个更为自然、直接的做法。此方法的预测任务中,我们最小化成本向量预测值$\hat{\mathbf{c}} = \mathbf{g}(\mathbf{x},\boldsymbol{\theta})$和真实值$\mathbf{c}$之间的预测误差,如均方误差$\ell_{\text{MSE}} (\hat{\mathbf{c}},\mathbf{c}) = {\lVert \hat{\mathbf{c}}-\mathbf{c} \rVert}^2$。熟悉机器学习的读者可能会发现,这实际上是一项非常经典的回归任务,对应的模型和算法已经相当成熟。而在决策任务中,一旦给定预测参数$\hat{\mathbf{c}}$,现代求解器可以将问题视作确定性优化直接求解。既然预测任务和决策任务都有成熟的方案,那么为什么我们还要尝试将它们结合在一起? 68 | 69 | 文献中的解释——“与直接考虑决策误差相比,基于预测误差训练预测模型会导致更糟糕的决策。”用人话来说就是:像$\ell_{\text{MSE}} (\hat{\mathbf{c}},\mathbf{c})$这样的预测误差,不能准确地衡量决策的质量。 70 | 71 | 在日常生活中,人们只关心决策的好坏,而不是各项指标预测的准确度。正如我们驱车赶往目的地时,只关心自己是否选中捷径,而无须精确预测每段可能经过的路段所耗费的时间。 72 | 73 | 让我们回到前文提到的线性优化问题:假设实际成本向量为$\mathbf{c}=(0,1)$,最优解为$\mathbf{w}^* (\mathbf{c}) = (0,1)$。当我们将成本向量预测为$\hat{\mathbf{c}} = (1,0)$,其最优解为$\mathbf{w}^* (\hat{\mathbf{c}}) = (1,0)$,预测的均方误差$\ell_{\text{MSE}} (\hat{\mathbf{c}},\mathbf{c}) = 2$;当我们将成本向量预测为$\hat{\mathbf{c}} = (0,3)$,其最优解为$\mathbf{w}^* (\hat{\mathbf{c}}) = (0,1)$,预测的均方误差$\ell_{\text{MSE}} (\hat{\mathbf{c}},\mathbf{c}) = 4$。 74 | 75 | 这个例子揭示了一个有趣的现象:后者虽然在预测误差上比前者大,但在决策上却是最优的。 76 | 77 | 因此,即使预测模型表现出了较大的误差,但只要预测的成本向量能引导我们做出正确的决策,这个预测模型就是有效的。这就是为什么我们需要考虑端对端预测后优化,我们希望训练出的模型能够引导我们做出最优的决策,而不必预测出精确的成本向量。 78 | 79 | 那么,如果预测模型的预测结果足够精确,那是不是可以摒弃使用端对端方法了呢?答案是肯定的。然而,不要忘记统计学家George E.P. Box有句名言: “All models are wrong, but some are useful.” 80 | 81 | ### 4.2 关于模仿学习 82 | 83 | 既然端对端方法展现了足够的优势,那我们为什么不妨更激进一点,采用模仿学习(Imitation Learning),把(最优)决策行为$\mathbf{w}^* (\mathbf{c})$作为标签,省去了中间的求解过程,直接训练模型$\hat{\mathbf{w}}^* = \mathbf{g}(\mathbf{x},\boldsymbol{\theta})$预测最优解呢? 84 | 85 | 毫无疑问,模仿学习在计算效率上具有显著优势,因为它规避了计算效率的主要瓶颈:优化求解。 86 | 87 | 然而,其局限性也很明显。尽管研究人员已经做出了许多尝试,比如Kervadec等人在损失函数上添加了修改过的障碍函数 [8];Donti等人使用了梯度投影(Gradiant Projection)将不可行解修正至可行域 [18]。但目前的预测模型,无论是线性回归、决策树、还是神经网络,在处理带有硬约束(Hard Constraints)的问题上仍存在难度,难以兼顾可行性、最优性和计算效率。因此,模仿学习的预测结果常常面临可行性问题,特别是对于高维度、有复杂约束的优化问题。 88 | 89 | 90 | ## 5 如何进行端对端预测后优化? 91 | 92 | 开篇名义,这个章节将会讨论端对端预测后优化的若干方法,这些方法适用的优化问题有所差异,但主要集中在成本向量$\mathbf{c}$未知、有线性目标函数的问题上。需要明确的是,这里强调的是目标函数的线性,并不意味着约束条件也必须是线性的。例如,在SPO+的相关论文中 [1],作者们探讨了具有二次约束的投资组合均值-方差模型。此外,对比、排序方法和损失函数近似法甚至对优化问题的形式几乎没有特定要求。 93 | 94 | 尽管也存在基于决策树的模型SPO Tree [9],大部分方法还是依赖梯度下降更新参数。之前提到,端对端学习的关键是计算求解过程的梯度$\frac{\partial \mathbf{w}^* (\hat{\mathbf{c}})}{\partial \hat{\mathbf{c}}}$。然而,传统的优化求解器和算法往往并未提供梯度信息。 95 | 96 | 更坏的消息是:线性规划、整数线性规划等具有线性目标函数的问题,其最优解$\mathbf{w}^* (\mathbf{c})$作为成本向量$\mathbf{c}$的函数,是一个分片常数函数(Piecewise Constant Function),它的一阶导数要么为0,要么不存在。熟悉线性规划敏感性分析的话,就会知道成本向量系数$\mathbf{c}$的元素发生变化时,最优解$\mathbf{w}^* (\mathbf{c})$要么不发生改变,要么会从可行域的一个极点跳到另一个极点。我们依然以线性规划$\underset{w_1,w_2}{\max} \lbrace c_1 w_1 + c_2 w_2: w_1 + w_2 ≤ 1,w_1, w_2 ≥ 0 \rbrace$为例,如图: 97 | 98 | ![Piecewise Constant Solver Function](media/81cbf99d80c180f7e810f50275b94d0f.png) 99 | 100 | 既然梯度几乎处处为0,梯度下降法似乎无法实施。然而,科研的魅力正是将不可能变为可能。面对这一挑战,研究者们提出了多种解决策略:一类是寻找替代的梯度信息,用以更新模型参数;另一类索性重新设计一个(有非0梯度的)替代损失函数。这两类思路基本囊括了基于梯度的端对端预测后优化算法: 101 | 102 | ### 5.1 基于KKT条件的隐函数求导 103 | 104 | Amos和Kolter提出“OptNet” [10],通过求解KKT条件的偏微分矩阵线性方程组来计算求解器反向传播的梯度。为了克服线性规划中无法得到非0梯度的问题,Wilder等人 [11] 在线性目标函数中加入了一个微小的二次项。基于这类方法,后续的研究者展开了多方面的探索。例如,引入割平面法(Cutting-Plane Method)以处理整数问题 [12],或使用障碍函数来替代拉格朗日罚函数 [13]。 105 | 106 | 值得一提的是,这类方法不仅能计算出目标函数成本向量的梯度,而且能够得到约束条件中参数的梯度信息。 107 | 108 | ### 5.2 SPO+ 109 | 110 | 不同于KKT方法,Elmachtoub和Grigas [1] 为目标函数是线性($\mathbf{c}^{\top} \mathbf{w}$)的决策误差找到了一个凸且可导的替代损失函数SPO+ Loss。 111 | 112 | 在这里,对于一个最小化问题$\underset{\mathbf{w} \in \mathbf{W}}{\min} \mathbf{c}^{\top} \mathbf{w}$,我们先定义一个决策损失“遗憾”:$\ell_{\text{Regret}} (\hat{\mathbf{c}}, \mathbf{c}) = \mathbf{c}^{{\top}} \mathbf{w}^* (\hat{\mathbf{c}}) - \mathbf{c}^{{\top}} \mathbf{w}^* (\mathbf{c})$,衡量实际成本向量$\mathbf{c}$下,实际目标值$\mathbf{c}^{{\top}} \mathbf{w}^* (\hat{\mathbf{c}})$与最优目标值$\mathbf{c}^{{\top}} \mathbf{w}^* (\mathbf{c})$之间的差距,也可以理解为优化间隙(optimality gap)。 113 | 114 | 由于$\mathbf{w}^* (\mathbf{c})$没有非0导数,这个损失函数同样也没有非0导数。Elmachtoub和Grigas [1] 找到了这个函数的一个凸上界作为替代: 115 | 116 | $$ 117 | \ell_{\text{SPO+}} (\hat{\mathbf{c}}, \mathbf{c}) = - \underset{\mathbf{w} \in \mathbf{W}}{\min} \{(2 \hat{\mathbf{c}} - \mathbf{c})^{\top} \mathbf{w}\} + 2 \hat{\mathbf{c}}^{\top} \mathbf{w}^* (\mathbf{c}) - \mathbf{c}^{{\top}} \mathbf{w}^* (\mathbf{c}) 118 | $$ 119 | 120 | 对于损失函数$\ell_{\text{SPO+}} (\hat{\mathbf{c}}, \mathbf{c})$,有次梯度: 121 | 122 | $$ 123 | 2 \mathbf{w}^* (\mathbf{c}) - 2 \mathbf{w}^* (2 \hat{\mathbf{c}} - \mathbf{c}) \in \frac{\partial \ell_{\text{SPO+}}(\hat{\mathbf{c}}, \mathbf{c})}{\partial \hat{\mathbf{c}}} 124 | $$ 125 | 126 | ### 5.3 扰动方法 127 | 128 | 同样是线性目标函数,扰动方法则是另辟蹊径,引入随机扰动来处理成本向量的预测值$\hat{\mathbf{c}}$。 129 | 130 | Berthet等人 [4] 用在高斯随机扰动$\boldsymbol{\xi}$下最优决策的期望值$\mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})]$代替$\mathbf{w}^* (\hat{\mathbf{c}})$。如图所示,$\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})$是可行域极点(基本可行解)的离散型随机向量,决策的期望值$\mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})]$实际上可视为可行域极点​​​​​​​的概率加权平均(凸组合)。与$\mathbf{w}^* (\hat{\mathbf{c}})$不同,只要$\hat{\mathbf{c}}$在$\mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})]$中发生一些微小的变化,可行域极点权重(其发生的概率)就会相应变化。本质上,扰动方法通过为离散的解向量引入概率分布实现平滑,这种方法与机器学习中SoftMax的思想有着异曲同工之处。 131 | 132 | ![Probability Extreme Point](media/044f4b46af76df9340c0e21795944381.png) 133 | 134 | 接下来,我们“只”需要通过概率密度函数$f(\boldsymbol{\xi})$的积分求期望 135 | 136 | $$ 137 | \mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})] = \int \mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi}) f(\boldsymbol{\xi}) d \boldsymbol{\xi} 138 | $$ 139 | 140 | 然后发现好像做不到。在实际操作中,我们用样本量为$K$的蒙特卡洛采样来近似期望: 141 | 142 | $$ 143 | \mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})] \approx \frac{1}{K} 144 | \sum_{\kappa}^K { \mathbf{w}^*(\hat{\mathbf{c}} + \sigma \boldsymbol{\xi}_{\kappa})} 145 | $$ 146 | 147 | 由于$\frac{\partial\mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})]}{\partial \hat{\mathbf{c}}}$存在且非0,梯度问题由此引刃而解。 148 | 149 | 除了加法扰动,Dalle等人 [14] 进一步提出了乘法扰动,同样引入高斯随机扰动$\boldsymbol{\xi}$,但让预测成本向量$\hat{\mathbf{c}}$与$e^{\sigma \boldsymbol{\xi} - 1/2 {\sigma}^2}$对应位元素相乘。乘法扰动消除了加法扰动可能引起的正负号变化问题。在一些特定的应用中,例如Dijkstra算法等,对成本向量有非负性的要求,乘法扰动就非常有用。 150 | 151 | 基于扰动方法,Berthet等人 [4] 利用了Fenchel-Young对偶的性质,进一步构造了一个新的损失函数,用来降低$F^{\boldsymbol{\xi}}(\hat{\mathbf{c}}) = \mathbb{E}^{\boldsymbol{\xi}}[\underset{\mathbf{w} \in \mathbf{W}}{\min} {\{(\hat{\mathbf{c}}+\sigma \boldsymbol{\xi})^{\top} \mathbf{w}\}}]$的对偶间隙。令$\Omega (\mathbf{w}^* ({\mathbf{c}}))$为$F^{\boldsymbol{\xi}}(\mathbf{c})$的对偶,则有: 152 | 153 | $$\ell_{\text{PFY}}(\hat{\mathbf{c}}, \mathbf{w}^* ({\mathbf{c}})) = \hat{\mathbf{c}}^{\top} \mathbf{w}^* ({\mathbf{c}}) - F^{\boldsymbol{\xi}}(\hat{\mathbf{c}}) - \Omega (\mathbf{w}^* ({\mathbf{c}}))$$ 154 | 155 | 这个损失函数可能看起来有些复杂,它甚至包含一个神秘的对偶函数$\Omega (\mathbf{w}^* ({\mathbf{c}}))$。但是,当我们对其进行求导操作时,会发现$\Omega (\mathbf{w}^* ({\mathbf{c}}))$实际上是常数,因此,梯度表达式非常简单: 156 | 157 | $$\frac{\partial \ell_{\text{PFY}}(\hat{\mathbf{c}}, \mathbf{w}^* ({\mathbf{c}}))}{\partial \hat{\mathbf{c}}} = \mathbf{w}^* ({\mathbf{c}}) - \mathbb{E}^{\boldsymbol{\xi}} [\mathbf{w}^* (\hat{\mathbf{c}} + \sigma \boldsymbol{\xi})]$$ 158 | 159 | ### 5.4 黑箱方法 160 | 161 | 面对$\mathbf{w}^* (\mathbf{c})$的不可导问题,有一个更加简单粗暴的方法,即将求解器函数视为一个“黑箱”,并利用解空间的几何形状等性质找到替代梯度。 162 | 163 | 如图所示,Pogancic等人 [3] 提出了“Differentiable Black-box”方法引入一个插值超参数$\lambda$。对于一个成本向量预测值$\hat{\mathbf{c}}$,在$\hat{\mathbf{c}}$与$\hat{\mathbf{c}} + \lambda \frac{\partial l (\hat{\mathbf{c}}, \mathbf{c})}{\partial \mathbf{w}^* (\hat{{\mathbf{c}}})}$之间对分片常数损失函数$l (\hat{\mathbf{c}}, \mathbf{c})$进行线性插值,从而将其转化为分片线性函数(Piecewise Affine Function),以此可得非0梯度。 164 | 165 | ![Affine Interpolation](media/ffa7d65c364918231fad932f5c088abe.png) 166 | 167 | 此外,Sahoo等人 [7] 提出了一种相当简洁的方案,即用负单位矩阵$- \mathbf{I}$替代求解器梯度$\frac{\partial \mathbf{w}^* (\hat{\mathbf{c}})}{\partial \hat{\mathbf{c}}}$。我们可以将其称为“Negative Identity”方法。从直观角度理解,对于一个最小化问题$\underset{\mathbf{w} \in \mathbf{W}}{\min} \mathbf{c}^{\top} \mathbf{w}$,我们希望通过如下方式更新成本向量的预测值$\hat{\mathbf{c}}$:沿着$\mathbf{w}^* (\hat{\mathbf{c}})$需要上升的方向减少,沿着$\mathbf{w}^* (\hat{\mathbf{c}})$需要下降的方向增加,这会使$\mathbf{w}^* (\hat{\mathbf{c}})$接近最优决策$\mathbf{w}^* (\mathbf{c})$。另外,该研究也证明了,这个方法可以看作是“Differentiable Black-box”方法在特定超参数λ下的特例。 168 | 169 | ### 5.5 对比、排序方法: 170 | 171 | Mulamba [5] 则是曲线救国,采用了 “噪声对比估计(Noise Contrastive Estimation)” 的技巧,巧妙地计算出替代损失函数。 172 | 173 | 首先,由于我们的可行域$\mathbf{w} \in \mathbf{W}$是固定不变的,因此在训练集以及训练、求解过程中,我们可以自然地收集到大量的可行解,形成一个解集合$\Gamma$。 174 | 175 | 该方法的关键思路是,将次优解的子集$\Gamma \setminus \mathbf{w}^* (c)$作为负样本,让最优解和“负样本”之间的的差值尽可能大。对于一个最小化问题$\underset{\mathbf{w} \in \mathbf{W}}{\min} \mathbf{c}^{\top} \mathbf{w}$,有: 176 | 177 | $$ 178 | \ell_{\text{NCE}} (\hat{\mathbf{c}},\mathbf{c}) = \frac{1}{|\Gamma|-1} \sum_{\mathbf{w} \in {\Gamma \setminus {\mathbf{w}^* (\mathbf{c})}}}(\hat{\mathbf{c}}^{\top} \mathbf{w}^* (\mathbf{c})-\hat{\mathbf{c}}^{\top} \mathbf{w}) 179 | $$ 180 | 181 | 受到这项工作构造损失函数区分最优解的启发,Mandi等人 [6] 提出了一种新思路,将端对端预测后优化任务转化为一个排序学习(Learning to rank) [15],学习一个目标函数(如$\hat{\mathbf{c}}^{\top} \mathbf{w}$)作为排序得分,以便对可行解的子集$\Gamma$进行正确排序(和使用真实成本向量$\mathbf{c}$时一致)。和之前的方法相比,这种方法的优势在于,它对使用的优化方法和目标函数的形式不加以限制。 182 | 183 | 例如,对于一个线性规划问题,$\mathbf{c}^{\top} \mathbf{w}$可以被视为排序得分。对于预测的成本向量$\hat{\mathbf{c}}$,为了排序得分$\hat{\mathbf{c}}^{\top} \mathbf{w}$能在解集$\mathbf{w} \in \Gamma$中有正确的排序,我们可以采用以下三种经典的排序学习方法:单文档方法(Pointwise Approach)、文档对方法(Pairwise Approach)、以及文档列表方法(Listwise Approach)。 184 | 185 | 在单文档方法中,我们希望成本向量的预测值$\hat{\mathbf{c}}$在可行解的子集$\Gamma$中的得分$\hat{\mathbf{c}}^{\top} \mathbf{w}$尽可能接近$\mathbf{c}^{\top} \mathbf{w}$;在文档对方法中,我们可以在最优解和其他解之间创造排序得分的差值;而在文档列表方法中,我们根据排序得分使用SoftMax函数计算每个可能解$\mathbf{w} \in \Gamma$被排在最前面的概率$P(\mathbf{w} | \mathbf{c})$,然后定义损失为概率的交叉熵: 186 | $$ 187 | \ell_{\text{LTR}} (\hat{\mathbf{c}},\mathbf{c}) = \frac{1}{|\Gamma|} \sum_{\mathbf{w} \in \Gamma} P(\mathbf{w} | \mathbf{c}) \log P(\mathbf{w} | \hat{\mathbf{c}})$$ 188 | 189 | ### 5.6 损失函数近似法 190 | 191 | 最后,我们来聊一个堪称邪道的方法——损失函数近似法。当我们的预测模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$预测出成本向量$\hat{\mathbf{c}}$后,我们需要寻找最优解$\mathbf{w}^* (\hat{\mathbf{c}})$,然后计算相应的决策损失$\ell(\hat{\mathbf{c}}, \mathbf{c})$。然而,这个过程面临着两个主要的问题:一是优化求解过程计算效率低下,二是损失函数$\ell(\hat{\mathbf{c}}, \mathbf{c})$可能不存在有效的梯度。 192 | 193 | 针对这些问题,Shah等人 [17] 提出了一个颇为惊人的方案:局部优化决策损失(Locally Optimized Decision Loss)。他们提出对于任意决策误差的损失函数$\ell(\hat{\mathbf{c}}, \mathbf{c})$,我们都可以使用一个额外的神经网络模型$h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c})$进行拟合。具体来说,他们通过采样预测成本向量和其对应的真实值$(\hat{\mathbf{c}}, \mathbf{c})$,训练近似函数模型$h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c})$,其损失定义为真实损失函数$\ell(\hat{\mathbf{c}}, \mathbf{c})$和近似损失函数$h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c})$的均方误差(MSE): 194 | 195 | $$ 196 | { \lVert \ell(\hat{\mathbf{c}}, \mathbf{c}) - h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c}) \rVert}^2 197 | $$ 198 | 199 | 接下来,我们固定好模型$h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c})$的参数,作为决策损失的近似。在这个近似的指导下,我们通过对$h_{\text{LODL}} (\mathbf{g}(\mathbf{x},\boldsymbol{\theta}), \mathbf{c})$执行梯度下降操作来更新预测模型$\mathbf{g}(\mathbf{x},\boldsymbol{\theta})$的参数$\boldsymbol{\theta}$。这个流程既避免了求解优化问题的计算成本,又确保了损失函数能够有效地计算梯度。 200 | 201 | ![LODL Procedure](media/je9idvsd97c0vxcj90ezk0e7v823b7asd.png) 202 | 203 | 虽然这种方法看似天方夜谭,实则根植于深度学习的一项核心理论——“万能近似定理(Universal Approximation Theorem)”,即神经网络理论上具备拟合任何函数的能力。事实上,值函数的近似是强化学习中的一种常见策略。因此,用类似的方法拟合端到端预测后优化的决策误差中也是行得通的。 204 | 205 | 这种策略优雅地规避了求解优化问题的计算效率瓶颈(尽管在训练近似函数$h_{\text{LODL}} (\hat{\mathbf{c}}, \mathbf{c})$的时候,优化求解仍然难以避免),同时充分利用了神经网络在拟合复杂损失函数方面的强大能力。然而,这也带来了额外的模型训练步骤,并且近似损失函数的准确性将直接影响到最终模型的表现。尽管理论上神经网络具备表示任何函数的能力,但在实践中,要训练神经网络有效地学习并近似特定函数可能并非易事。这涉及到一个复杂损失函数的优化问题,可能存在大量的局部最优解,而且可能受到过拟合、梯度消失、梯度爆炸等问题的影响。此外,这种方法在基准数据集上的性能尚缺乏详尽的比较,其实际效用仍待进一步探索和证明。 206 | 207 | 208 | ## 6 使用PyEPO进行端对端预测后优化 209 | 210 | PyEPO(PyTorch-based End-to-End Predict-then-Optimize Tool) [16] 是我读博期间的开发的工具,该工具的源代码已经发布在GitHub上,可以通过以下链接查找:。它是一款基于Python的开源软件,支持预测后优化问题的建模和求解。PyEPO的核心功能是使用CoptPy、GurobiPy、Pyomo或其他求解器和算法建立优化模型,然后将优化模型嵌入到人工神经网络中进行端到端训练。具体来说,PyEPO借助PyTorch autograd模块,实现了如SPO+、黑箱方法、扰动方法以及对比排序方法等多种策略的框架。具体使用方法可以查看[文档](https://khalil-research.github.io/PyEPO)。 211 | 212 | ![Logo](media/a31aaba4a573c4e2a74723f5d555bdf2.png) 213 | 214 | 作为一款开源工具,PyEPO非常欢迎社区的反馈和贡献,我们也会持续更新并优化工具中的算法。 215 | 216 | ### 6.1 下载和安装 217 | 218 | 要下载PyEPO,你可以从GitHub仓库克隆: 219 | 220 | ```bash 221 | git clone -b main --depth 1 https://github.com/khalil-research/PyEPO.git 222 | ``` 223 | 224 | 之后进行安装: 225 | 226 | ```bash 227 | pip install PyEPO/pkg/. 228 | ``` 229 | 230 | ### 6.2 建立优化模型 231 | 232 | 使用PyEPO的第一步是创建一个继承于optModel类的优化模型。由于PyEPO处理未知成本系数的预测后优化,因此首先需要实例化一个具有固定约束和可变成本的优化模型optModel。这样一个优化模型可以接受不同的成本向量,并能够在固定的约束条件下找到相应的最优解。 233 | 234 | 在PyEPO中,optModel类的作用类似于一个黑箱对求解器进行封装,这意味着PyEPO并不一定要使用某种特定的算法或求解器。 235 | 236 | 对如下问题: 237 | 238 | $$ 239 | \begin{aligned} 240 | \underset{\mathbf{x}}{\max} & \sum_{i=0}^4 c_i x_i \\ 241 | s.t. \quad & 3 x_0 + 4 x_1 + 3 x_2 + 6 x_3 + 4 x_4 \leq 12 \\ 242 | & 4 x_0 + 5 x_1 + 2 x_2 + 3 x_3 + 5 x_4 \leq 10 \\ 243 | & 5 x_0 + 4 x_1 + 6 x_2 + 2 x_3 + 3 x_4 \leq 15 \\ 244 | & \forall x_i \in \{0, 1\} 245 | \end{aligned} 246 | $$ 247 | 248 | PyEPO也提供了COPT和Gurobi的API,用户能轻松地对各种优化问题进行建模,无需手动编写复杂的求解过程: 249 | 250 | #### COPT 251 | 252 | ```python 253 | from coptpy import COPT 254 | from coptpy import Envr 255 | from pyepo.model.copt import optCoptModel 256 | 257 | class myOptModel(optCoptModel): 258 | def _getModel(self): 259 | # ceate a model 260 | m = Envr().createModel() 261 | # varibles 262 | x = m.addVars(5, nameprefix='x', vtype=COPT.BINARY) 263 | # sense 264 | m.setObjSense(COPT.MAXIMIZE) 265 | # constraints 266 | m.addConstr(3*x[0]+4*x[1]+3*x[2]+6*x[3]+4*x[4]<=12) 267 | m.addConstr(4*x[0]+5*x[1]+2*x[2]+3*x[3]+5*x[4]<=10) 268 | m.addConstr(5*x[0]+4*x[1]+6*x[2]+2*x[3]+3*x[4]<=15) 269 | return m, x 270 | 271 | optmodel = myOptModel() 272 | ``` 273 | 274 | #### Gurobi 275 | 276 | ```python 277 | import gurobipy as gp 278 | from gurobipy import GRB 279 | from pyepo.model.grb import optGrbModel 280 | 281 | class myOptModel(optGrbModel): 282 | def _getModel(self): 283 | # ceate a model 284 | m = gp.Model() 285 | # varibles 286 | x = m.addVars(5, name="x", vtype=GRB.BINARY) 287 | # sense 288 | m.modelSense = GRB.MAXIMIZE 289 | # constraints 290 | m.addConstr(3*x[0]+4*x[1]+3*x[2]+6*x[3]+4*x[4]<=12) 291 | m.addConstr(4*x[0]+5*x[1]+2*x[2]+3*x[3]+5*x[4]<=10) 292 | m.addConstr(5*x[0]+4*x[1]+6*x[2]+2*x[3]+3*x[4]<=15) 293 | return m, x 294 | 295 | optmodel = myOptModel() 296 | ``` 297 | 298 | ### 6.3 生成数据集 299 | 300 | 我们用随机特征生成有高斯噪音的成本向量: 301 | 302 | ```python 303 | import torch 304 | torch.manual_seed(42) 305 | 306 | num_data = 1000 # number of data 307 | num_feat = 5 # feature dimension 308 | num_cost = 5 # cost dimension 309 | 310 | # randomly generate data 311 | x_true = torch.rand(num_data, num_feat) # feature 312 | weight_true = torch.rand(num_feat, num_cost) # weight 313 | bias_true = torch.randn(num_cost) # bias 314 | noise = 0.5 * torch.randn(num_data, num_cost) # random noise 315 | c_true = x_true @ weight_true + bias_true + noise # cost coef 316 | ``` 317 | 318 | 对于端到端预测后优化,只有成本向量$\mathbf{c}$作为标签是不够的,我们还需要最优解$\mathbf{w}^* (\mathbf{c})$和相应的目标函数值。因此,我们可以使用optDataset。optDataset是在PyTorch的Dataset类的基础上进行扩展的一个类,它允许我们利用optModel方便地获取求解数据,并且可以被PyTorch的DataLoader直接使用。 319 | 320 | ```python 321 | # split train test data 322 | from sklearn.model_selection import train_test_split 323 | x_train, x_test, c_train, c_test = train_test_split(x_true, c_true, test_size=200, random_state=42) 324 | 325 | # build optDataset 326 | from pyepo.data.dataset import optDataset 327 | dataset_train = optDataset(optmodel, x_train, c_train) 328 | dataset_test = optDataset(optmodel, x_test, c_test) 329 | 330 | # build DataLoader 331 | from torch.utils.data import DataLoader 332 | batch_size = 32 333 | loader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True) 334 | loader_test = DataLoader(dataset_test, batch_size=batch_size, shuffle=False) 335 | ``` 336 | 337 | ### 6.4 建立预测模型 338 | 339 | 由于PyEPO是基于PyTorch构建的,所以我们可以像平常一样使用PyTorch进行模型的搭建,函数的使用,以及模型的训练等操作。这为使用各种深度学习技术的用户提供了极大的便利。下面,我们将建立一个简单的线性回归模型作为预测模型: 340 | 341 | ```python 342 | import torch 343 | from torch import nn 344 | 345 | # build linear model 346 | class LinearRegression(nn.Module): 347 | def __init__(self): 348 | super(LinearRegression, self).__init__() 349 | self.linear = nn.Linear(num_feat, num_cost) 350 | 351 | def forward(self, x): 352 | out = self.linear(x) 353 | return out 354 | 355 | # init model 356 | reg = LinearRegression() 357 | # cuda 358 | if torch.cuda.is_available(): 359 | reg = reg.cuda() 360 | ``` 361 | 362 | ### 6.5 模型的训练和测试 363 | 364 | PyEPO的核心组件就是它的autograd优化模块,可以方便地调用前文中提到的各种方法,比如: 365 | 366 | #### SPO+ 367 | 368 | ```python 369 | import pyepo 370 | # init SPO+ loss 371 | spop = pyepo.func.SPOPlus(optmodel, processes=2) 372 | ``` 373 | 374 | #### 扰动方法 375 | 376 | ```python 377 | import pyepo 378 | # init perturbed optimizer layer 379 | ptb = pyepo.func.perturbedOpt(optmodel, n_samples=3, sigma=1.0, processes=2) 380 | # init perturbed Fenchel-Younge loss 381 | pfy = pyepo.func.perturbedFenchelYoung(optmodel, n_samples=3, sigma=1.0, processes=2) 382 | ``` 383 | 384 | #### 黑箱方法 385 | 386 | ```python 387 | import pyepo 388 | # init dbb optimizer layer 389 | dbb = pyepo.func.blackboxOpt(optmodel, lambd=20, processes=2) 390 | # init optimizer layer with identity grad 391 | nid = pyepo.func.negativeIdentity(optmodel, processes=2) 392 | ``` 393 | 394 | #### 对比、排序方法 395 | 396 | ```python 397 | import pyepo 398 | # init NCE loss 399 | nce = pyepo.func.NCE(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train) 400 | # init constrastive MAP loss 401 | cmap = pyepo.func.contrastiveMAP(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train) 402 | ``` 403 | 404 | ```python 405 | import pyepo 406 | # init pointwise LTR loss 407 | ltr = pyepo.func.pointwiseLTR(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train) 408 | # init pairwise LTR loss 409 | ltr = pyepo.func.pairwiseLTR(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train) 410 | # init listwise LTR loss 411 | ltr = pyepo.func.listwiseLTR(optmodel, processes=2, solve_ratio=0.05, dataset=dataset_train) 412 | ``` 413 | 414 | #### 训练 415 | 416 | 417 | 接下来,以SPO+为例,我们可以正常使用PyTorch进行模型训练: 418 | 419 | ```python 420 | # set adam optimizer 421 | optimizer = torch.optim.Adam(reg.parameters(), lr=5e-3) 422 | 423 | # train mode 424 | reg.train() 425 | for epoch in range(5): 426 | # load data 427 | for i, data in enumerate(loader_train): 428 | x, c, w, z = data # feat, cost, sol, obj 429 | # cuda 430 | if torch.cuda.is_available(): 431 | x, c, w, z = x.cuda(), c.cuda(), w.cuda(), z.cuda() 432 | # forward pass 433 | cp = reg(x) 434 | # spo+ loss 435 | loss = spop(cp, c, w, z) 436 | # backward pass 437 | optimizer.zero_grad() 438 | loss.backward() 439 | optimizer.step() 440 | # log 441 | regret = pyepo.metric.regret(reg, optmodel, loader_test) 442 | print("Loss: {:9.4f}, Regret: {:7.4f}%".format(loss.item(), regret*100)) 443 | ``` 444 | 445 | 由于不同的模块可能有不同的输入输出,在使用这些模块时,我们需要特别关注各模块的接口文档,确保我们的输入输出数据与其兼容,避免出现不一致的情况。 446 | 447 | 以扰动优化(perturbedOpt)为例,其训练过程和SPO+有所不同: 448 | 449 | ```python 450 | # set adam optimizer 451 | optimizer = torch.optim.Adam(reg.parameters(), lr=5e-3) 452 | # set some loss 453 | l1 = nn.L1Loss() 454 | 455 | # train mode 456 | reg.train() 457 | for epoch in range(5): 458 | # load data 459 | for i, data in enumerate(loader_train): 460 | x, c, w, z = data # feat, cost, sol, obj 461 | # cuda 462 | if torch.cuda.is_available(): 463 | x, c, w, z = x.cuda(), c.cuda(), w.cuda(), z.cuda() 464 | # forward pass 465 | cp = reg(x) 466 | # perturbed optimizer 467 | we = ptb(cp) 468 | # loss 469 | loss = l1(we, w) 470 | # backward pass 471 | optimizer.zero_grad() 472 | loss.backward() 473 | optimizer.step() 474 | # log 475 | regret = pyepo.metric.regret(reg, optmodel, loader_test) 476 | print("Loss: {:9.4f}, Regret: {:7.4f}%".format(loss.item(), regret*100)) 477 | ``` 478 | 479 | ## 7 结语 480 | 481 | 端到端预测后优化是一项有趣的工作,也正是这项工作激发了我对优化和机器学习的深入探索。我无比敬佩在这个领域中工作的研究者们,他们提出的各种方法都有着独特的理论支撑和应用价值。我们明白这只是一个开始,端对端预测后优化这个领域还有有许多新的问题和理论等待我们去探索。我期待在未来的研究中,我们可以继续深化对这个领域的理解,发现更多的可能性。 482 | 483 | 484 | ## *参考文献* 485 | 486 | [1] Elmachtoub, A. N., & Grigas, P. (2021). Smart “predict, then optimize”. Management Science. 487 | 488 | [2] Mandi, J., Stuckey, P. J., & Guns, T. (2020). Smart predict-and-optimize for hard combinatorial optimization problems. In Proceedings of the AAAI Conference on Artificial Intelligence. 489 | 490 | [3] Pogančić, M. V., Paulus, A., Musil, V., Martius, G., & Rolinek, M. (2019, September). Differentiation of blackbox combinatorial solvers. In International Conference on Learning Representations. 491 | 492 | [4] Berthet, Q., Blondel, M., Teboul, O., Cuturi, M., Vert, J. P., & Bach, F. (2020). Learning with differentiable pertubed optimizers. Advances in neural information processing systems, 33, 9508-9519. 493 | 494 | [5] Mulamba, M., Mandi, J., Diligenti, M., Lombardi, M., Bucarey, V., & Guns, T. (2021). Contrastive losses and solution caching for predict-and-optimize. Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence. 495 | 496 | [6] Mandi, J., Bucarey, V., Mulamba, M., & Guns, T. (2022). Decision-focused learning: through the lens of learning to rank. Proceedings of the 39th International Conference on Machine Learning. 497 | 498 | [7] Sahoo, S. S., Paulus, A., Vlastelica, M., Musil, V., Kuleshov, V., & Martius, G. (2022). Backpropagation through combinatorial algorithms: Identity with projection works. arXiv preprint arXiv:2205.15213. 499 | 500 | [8] Kervadec, H., Dolz, J., Yuan, J., Desrosiers, C., Granger, E., & Ayed, I. B. (2022, August). Constrained deep networks: Lagrangian optimization via log-barrier extensions. In 2022 30th European Signal Processing Conference (EUSIPCO) (pp. 962-966). IEEE. 501 | 502 | [9] Elmachtoub, A. N., Liang, J. C. N., & McNellis, R. (2020, November). Decision trees for decision-making under the predict-then-optimize framework. In International Conference on Machine Learning (pp. 2858-2867). PMLR. 503 | 504 | [10] Amos, B., & Kolter, J. Z. (2017, July). Optnet: Differentiable optimization as a layer in neural networks. In International Conference on Machine Learning (pp. 136-145). PMLR. 505 | 506 | [11] Wilder, B., Dilkina, B., & Tambe, M. (2019, July). Melding the data-decisions pipeline: Decision-focused learning for combinatorial optimization. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 33, No. 01, pp. 1658-1665). 507 | 508 | [12] Mandi, J., & Guns, T. (2020). Interior point solving for lp-based prediction+ optimisation. Advances in Neural Information Processing Systems, 33, 7272-7282. 509 | 510 | [13] Ferber, A., Wilder, B., Dilkina, B., & Tambe, M. (2020, April). Mipaal: Mixed integer program as a layer. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 34, No. 02, pp. 1504-1511). 511 | 512 | [14] Dalle, G., Baty, L., Bouvier, L., & Parmentier, A. (2022). Learning with combinatorial optimization layers: a probabilistic approach. arXiv preprint arXiv:2207.13513. 513 | 514 | [15] Liu, T. Y. (2009). Learning to rank for information retrieval. Foundations and Trends® in Information Retrieval, 3(3), 225-331. 515 | 516 | [16] Tang, B., & Khalil, E. B. (2022). PyEPO: A PyTorch-based end-to-end predict-then-optimize library for linear and integer programming. arXiv preprint arXiv:2206.14234. 517 | 518 | [17] Shah, S., Wilder, B., Perrault, A., & Tambe, M. (2022). Learning (local) surrogate loss functions for predict-then-optimize problems. arXiv e-prints, arXiv-2203. 519 | 520 | [18] Donti, P. L., Rolnick, D., & Kolter, J. Z. (2021). DC3: A learning method for optimization with hard constraints. arXiv preprint arXiv:2104.12225. 521 | -------------------------------------------------------------------------------- /media/044f4b46af76df9340c0e21795944381.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/044f4b46af76df9340c0e21795944381.png -------------------------------------------------------------------------------- /media/81cbf99d80c180f7e810f50275b94d0f.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/81cbf99d80c180f7e810f50275b94d0f.png -------------------------------------------------------------------------------- /media/a31aaba4a573c4e2a74723f5d555bdf2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/a31aaba4a573c4e2a74723f5d555bdf2.png -------------------------------------------------------------------------------- /media/df9889a4142bfb2523e1a67d849f72eb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/df9889a4142bfb2523e1a67d849f72eb.png -------------------------------------------------------------------------------- /media/ffa7d65c364918231fad932f5c088abe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/ffa7d65c364918231fad932f5c088abe.png -------------------------------------------------------------------------------- /media/je9idvsd97c0vxcj90ezk0e7v823b7asd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/je9idvsd97c0vxcj90ezk0e7v823b7asd.png -------------------------------------------------------------------------------- /media/media.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/media/media.pptx -------------------------------------------------------------------------------- /当机器学习PyTorch遇到运筹学COPT.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LucasBoTang/PyEPO-PredOpt-Chinese-Tutorial/c62cfb8a8a23f828dc2134fea346bc7df0739006/当机器学习PyTorch遇到运筹学COPT.pdf --------------------------------------------------------------------------------