├── docs ├── Project.toml ├── src │ ├── api.md │ ├── future.md │ ├── index.md │ ├── notes.md │ └── new.md └── make.jl ├── .gitignore ├── test ├── using_JuMP │ ├── data │ │ ├── SalesData.xlsx │ │ ├── Cereal.txt │ │ ├── Soccer.txt │ │ └── StarWars.csv │ ├── problem_modification.jl │ ├── nonlinear_modelling.jl │ └── working_with_data_files.jl ├── introduction │ ├── getting_started_with_JuMP.jl │ ├── solvers_and_solutions.jl │ ├── variables_constraints_objective.jl │ └── an_introduction_to_julia.jl ├── optimization_concepts │ ├── integer_programming.jl │ ├── conic_programming.jl │ └── benders_decomposition.jl ├── modelling │ ├── experiment_design.jl │ ├── network_flows.jl │ ├── sudoku.jl │ ├── finance.jl │ ├── rocket_control.jl │ ├── problems_on_graphs.jl │ ├── geometric_problems.jl │ └── power_systems.jl └── runtests.jl ├── script ├── using_JuMP │ ├── data │ │ ├── SalesData.xlsx │ │ ├── Cereal.txt │ │ ├── Soccer.txt │ │ └── StarWars.csv │ ├── problem_modification.jl │ ├── nonlinear_modelling.jl │ └── working_with_data_files.jl ├── introduction │ ├── getting_started_with_JuMP.jl │ ├── variables_constraints_objective.jl │ └── solvers_and_solutions.jl ├── modelling │ ├── sudoku.jl │ ├── network_flows.jl │ ├── rocket_control.jl │ ├── experiment_design.jl │ ├── problems_on_graphs.jl │ └── geometric_problems.jl └── optimization_concepts │ ├── integer_programming.jl │ └── conic_programming.jl ├── notebook ├── modelling │ ├── img │ │ ├── power_systems.png │ │ ├── g1.gv │ │ ├── g3.gv │ │ ├── g2.gv │ │ ├── g1.svg │ │ ├── g3.svg │ │ └── g2.svg │ └── sudoku.ipynb ├── using_JuMP │ └── data │ │ ├── SalesData.xlsx │ │ ├── Cereal.txt │ │ ├── Soccer.txt │ │ └── StarWars.csv └── introduction │ └── getting_started_with_JuMP.ipynb ├── .travis.yml ├── LICENSE ├── Project.toml ├── src └── JuMPTutorials.jl └── README.md /docs/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /Manifest.toml 3 | /dev/ 4 | /docs/build/ 5 | /docs/site/ 6 | *.ipynb_checkpoints* 7 | -------------------------------------------------------------------------------- /docs/src/api.md: -------------------------------------------------------------------------------- 1 | # JuMPTutorials.jl 2 | 3 | ```@index 4 | ``` 5 | 6 | ```@autodocs 7 | Modules = [JuMPTutorials] 8 | ``` 9 | -------------------------------------------------------------------------------- /test/using_JuMP/data/SalesData.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/JuMPTutorials.jl/master/test/using_JuMP/data/SalesData.xlsx -------------------------------------------------------------------------------- /script/using_JuMP/data/SalesData.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/JuMPTutorials.jl/master/script/using_JuMP/data/SalesData.xlsx -------------------------------------------------------------------------------- /notebook/modelling/img/power_systems.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/JuMPTutorials.jl/master/notebook/modelling/img/power_systems.png -------------------------------------------------------------------------------- /notebook/using_JuMP/data/SalesData.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/JuMPTutorials.jl/master/notebook/using_JuMP/data/SalesData.xlsx -------------------------------------------------------------------------------- /notebook/modelling/img/g1.gv: -------------------------------------------------------------------------------- 1 | digraph { 2 | nodesep=1.0 3 | size="10.75,10.25" 4 | 1[rank=source] 5 | 2[rank=sink] 6 | 1 -> 2[label="100",weight="100"]; 7 | 1 -> 3[label="30",weight="30"]; 8 | 2 -> 3[label="20",weight="20"]; 9 | 3 -> 4[label="10",weight="10"]; 10 | 3 -> 5[label="60",weight="60"]; 11 | 4 -> 2[label="15",weight="15"]; 12 | 4 -> 5[label="50",weight="50"]; 13 | } 14 | -------------------------------------------------------------------------------- /notebook/modelling/img/g3.gv: -------------------------------------------------------------------------------- 1 | digraph { 2 | splines=false; 3 | nodesep=1; 4 | //rotate=90 5 | //rankdir=LR; 6 | size="10.75,10.25" 7 | 1 -> 2[label="3"]; 8 | 1 -> 3[label="2"]; 9 | 1 -> 4[label="2"]; 10 | 2 -> 5[label="5"]; 11 | 2 -> 6[label="1"]; 12 | 3 -> 5[label="1"]; 13 | 3 -> 6[label="3"]; 14 | 3 -> 7[label="1"]; 15 | 4 -> 6[label="1"]; 16 | 5 -> 8[label="4"]; 17 | 6 -> 8[label="2"]; 18 | 7 -> 8[label="4"]; 19 | } -------------------------------------------------------------------------------- /docs/make.jl: -------------------------------------------------------------------------------- 1 | using Documenter, JuMPTutorials 2 | 3 | makedocs( 4 | modules = [JuMPTutorials], 5 | format = Documenter.HTML(), 6 | sitename = "JuMPTutorials.jl", 7 | authors = "Arpit Bhatia", 8 | pages = [ 9 | "Home" => "index.md", 10 | "Adding a New Tutorial" => "new.md", 11 | "Notes" => "notes.md", 12 | "Future Work" => "future.md", 13 | "Function Index" => "api.md" 14 | ] 15 | ) 16 | 17 | Documenter.deploydocs(repo = "github.com/JuliaOpt/JuMPTutorials.jl.git") 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Documentation: http://docs.travis-ci.com/user/languages/julia/ 2 | language: julia 3 | os: 4 | - linux 5 | julia: 6 | - 1.0 7 | - 1.4 8 | - nightly 9 | notifications: 10 | email: false 11 | jobs: 12 | allow_failures: 13 | - julia: nightly 14 | include: 15 | - stage: "Documentation" 16 | julia: 1.0 17 | os: linux 18 | script: 19 | - julia --project=docs/ -e 'using Pkg; Pkg.instantiate(); Pkg.develop(PackageSpec(path=pwd()))' 20 | - julia --color=yes --project=docs/ docs/make.jl 21 | after_success: skip 22 | -------------------------------------------------------------------------------- /notebook/modelling/img/g2.gv: -------------------------------------------------------------------------------- 1 | digraph { 2 | splines=false; 3 | j1[label=1>]; 4 | j2[label=2>]; 5 | j3[label=3>]; 6 | j4[label=4>]; 7 | 8 | 1 -> j1[label="6", weight="6"]; 9 | 1 -> j2[label="4", weight="4"]; 10 | 1 -> j3[label="5", weight="5"]; 11 | 2 -> j2[label="3", weight="3"]; 12 | 2 -> j3[label="6", weight="6"]; 13 | 3 -> j1[label="5", weight="5"]; 14 | 3 -> j3[label="4", weight="4"]; 15 | 3 -> j4[label="3", weight="3"]; 16 | 4 -> j1[label="7", weight="7"]; 17 | 4 -> j2[label="5", weight="5"]; 18 | 4 -> j3[label="5", weight="5"]; 19 | 4 -> j4[label="5", weight="5"]; 20 | } -------------------------------------------------------------------------------- /docs/src/future.md: -------------------------------------------------------------------------------- 1 | # Future Work 2 | 3 | The following are ongoing dev efforts where things are subject to change and hence should be worked on only after they are finalized. 4 | 5 | - Callbacks 6 | - Column generation 7 | - Row generation 8 | - Nonlinear Modeling 9 | - JuMP Extensions 10 | 11 | ## Skipped Tutorials 12 | 13 | The following tutorials were discussed and skipped based on the reason specified- 14 | 15 | - Sensitivity Analysis - Solver Attributes not supported by MOI right now 16 | - Duality - Theoretical in nature, add points to other notebooks 17 | - Using Different Julia Data Types - Topic not very relevant, useful packages are covered in different tutorials 18 | - Exploiting Sparsity - Topic not very relevant as JuMP is already based on spare data structure 19 | - Notebooks on Performance - Unsure of exact content -------------------------------------------------------------------------------- /test/introduction/getting_started_with_JuMP.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | 5 | model = Model(with_optimizer(GLPK.Optimizer)) 6 | @variable(model, x >= 0) 7 | @variable(model, y >= 0) 8 | @constraint(model, 6x + 8y >= 100) 9 | @constraint(model, 7x + 12y >= 120) 10 | @objective(model, Min, 12x + 20y) 11 | 12 | optimize!(model) 13 | 14 | @show value(x); 15 | @show value(y); 16 | @show objective_value(model); 17 | 18 | 19 | using JuMP 20 | 21 | 22 | using GLPK 23 | 24 | 25 | model = Model(with_optimizer(GLPK.Optimizer)); 26 | 27 | 28 | @variable(model, x >= 0) 29 | @variable(model, y >= 0); 30 | 31 | 32 | @constraint(model, 6x + 8y >= 100) 33 | @constraint(model, 7x + 12y >= 120); 34 | 35 | 36 | @objective(model, Min, 12x + 20y); 37 | 38 | 39 | optimize!(model) 40 | 41 | 42 | @show value(x); 43 | @show value(y); 44 | @show objective_value(model); 45 | 46 | -------------------------------------------------------------------------------- /test/using_JuMP/data/Cereal.txt: -------------------------------------------------------------------------------- 1 | Name Cups Calories Carbs Fat Fiber Potassium Protein Sodium Sugars 2 | CapnCrunch 0.75 120 12 2 0 35 1 220 12 3 | CocoaPuffs 1 110 12 1 0 55 1 180 13 4 | Trix 1 110 13 1 0 25 1 140 12 5 | AppleJacks 1 110 11 0 1 30 2 125 14 6 | CornChex 1 110 22 0 0 25 2 280 3 7 | CornFlakes 1 100 21 0 1 35 2 290 2 8 | Nut&Honey 0.67 120 15 1 0 40 2 190 9 9 | Smacks 0.75 110 9 1 1 40 2 70 15 10 | MultiGrain 1 100 15 1 2 90 2 220 6 11 | CracklinOat 0.5 110 10 3 4 160 3 140 7 12 | GrapeNuts 0.25 110 17 0 3 90 3 179 3 13 | HoneyNutCheerios 0.75 110 11.5 1 1.5 90 3 250 10 14 | NutriGrain 0.67 140 21 2 3 130 3 220 7 15 | Product19 1 100 20 0 1 45 3 320 3 16 | TotalRaisinBran 1 140 15 1 4 230 3 190 14 17 | WheatChex 0.67 100 17 1 3 115 3 230 3 18 | Oatmeal 0.5 130 13.5 2 1.5 120 3 170 10 19 | Life 0.67 100 12 2 2 95 4 150 6 20 | Maypo 1 100 16 1 0 95 4 0 3 21 | QuakerOats 0.5 100 14 1 2 110 4 135 6 22 | Muesli 1 150 16 3 3 170 4 150 11 23 | Cheerios 1.25 110 17 2 2 105 6 290 1 24 | SpecialK 1 110 16 0 1 55 6 230 3 25 | -------------------------------------------------------------------------------- /notebook/using_JuMP/data/Cereal.txt: -------------------------------------------------------------------------------- 1 | Name Cups Calories Carbs Fat Fiber Potassium Protein Sodium Sugars 2 | CapnCrunch 0.75 120 12 2 0 35 1 220 12 3 | CocoaPuffs 1 110 12 1 0 55 1 180 13 4 | Trix 1 110 13 1 0 25 1 140 12 5 | AppleJacks 1 110 11 0 1 30 2 125 14 6 | CornChex 1 110 22 0 0 25 2 280 3 7 | CornFlakes 1 100 21 0 1 35 2 290 2 8 | Nut&Honey 0.67 120 15 1 0 40 2 190 9 9 | Smacks 0.75 110 9 1 1 40 2 70 15 10 | MultiGrain 1 100 15 1 2 90 2 220 6 11 | CracklinOat 0.5 110 10 3 4 160 3 140 7 12 | GrapeNuts 0.25 110 17 0 3 90 3 179 3 13 | HoneyNutCheerios 0.75 110 11.5 1 1.5 90 3 250 10 14 | NutriGrain 0.67 140 21 2 3 130 3 220 7 15 | Product19 1 100 20 0 1 45 3 320 3 16 | TotalRaisinBran 1 140 15 1 4 230 3 190 14 17 | WheatChex 0.67 100 17 1 3 115 3 230 3 18 | Oatmeal 0.5 130 13.5 2 1.5 120 3 170 10 19 | Life 0.67 100 12 2 2 95 4 150 6 20 | Maypo 1 100 16 1 0 95 4 0 3 21 | QuakerOats 0.5 100 14 1 2 110 4 135 6 22 | Muesli 1 150 16 3 3 170 4 150 11 23 | Cheerios 1.25 110 17 2 2 105 6 290 1 24 | SpecialK 1 110 16 0 1 55 6 230 3 25 | -------------------------------------------------------------------------------- /script/using_JuMP/data/Cereal.txt: -------------------------------------------------------------------------------- 1 | Name Cups Calories Carbs Fat Fiber Potassium Protein Sodium Sugars 2 | CapnCrunch 0.75 120 12 2 0 35 1 220 12 3 | CocoaPuffs 1 110 12 1 0 55 1 180 13 4 | Trix 1 110 13 1 0 25 1 140 12 5 | AppleJacks 1 110 11 0 1 30 2 125 14 6 | CornChex 1 110 22 0 0 25 2 280 3 7 | CornFlakes 1 100 21 0 1 35 2 290 2 8 | Nut&Honey 0.67 120 15 1 0 40 2 190 9 9 | Smacks 0.75 110 9 1 1 40 2 70 15 10 | MultiGrain 1 100 15 1 2 90 2 220 6 11 | CracklinOat 0.5 110 10 3 4 160 3 140 7 12 | GrapeNuts 0.25 110 17 0 3 90 3 179 3 13 | HoneyNutCheerios 0.75 110 11.5 1 1.5 90 3 250 10 14 | NutriGrain 0.67 140 21 2 3 130 3 220 7 15 | Product19 1 100 20 0 1 45 3 320 3 16 | TotalRaisinBran 1 140 15 1 4 230 3 190 14 17 | WheatChex 0.67 100 17 1 3 115 3 230 3 18 | Oatmeal 0.5 130 13.5 2 1.5 120 3 170 10 19 | Life 0.67 100 12 2 2 95 4 150 6 20 | Maypo 1 100 16 1 0 95 4 0 3 21 | QuakerOats 0.5 100 14 1 2 110 4 135 6 22 | Muesli 1 150 16 3 3 170 4 150 11 23 | Cheerios 1.25 110 17 2 2 105 6 290 1 24 | SpecialK 1 110 16 0 1 55 6 230 3 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Arpit Bhatia 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /script/using_JuMP/data/Soccer.txt: -------------------------------------------------------------------------------- 1 | "Team"::"Played"::"Wins"::"Draws"::"Losses"::"Goals_for"::"Goals_against" 2 | "Barcelona"::38::30::4::4::"110 goals"::"21 goals" 3 | "Real Madrid"::38::30::2::6::"118 goals"::"38 goals" 4 | "Atletico Madrid"::38::23::9::6::"67 goals"::"29 goals" 5 | "Valencia"::38::22::11::5::"70 goals"::"32 goals" 6 | "Seville"::38::23::7::8::"71 goals"::"45 goals" 7 | "Villarreal"::38::16::12::10::"48 goals"::"37 goals" 8 | "Athletic Bilbao"::38::15::10::13::"42 goals"::"41 goals" 9 | "Celta Vigo"::38::13::12::13::"47 goals"::"44 goals" 10 | "Malaga"::38::14::8::16::"42 goals"::"48 goals" 11 | "Espanyol"::38::13::10::15::"47 goals"::"51 goals" 12 | "Rayo Vallecano"::38::15::4::19::"46 goals"::"68 goals" 13 | "Real Sociedad"::38::11::13::14::"44 goals"::"51 goals" 14 | "Elche"::38::11::8::19::"35 goals"::"62 goals" 15 | "Levante"::38::9::10::19::"34 goals"::"67 goals" 16 | "Getafe"::38::10::7::21::"33 goals"::"64 goals" 17 | "Deportivo La Coruna"::38::7::14::17::"35 goals"::"60 goals" 18 | "Granada"::38::7::14::17::"29 goals"::"64 goals" 19 | "Eibar"::38::9::8::21::"34 goals"::"55 goals" 20 | "Almeria"::38::8::8::22::"35 goals"::"64 goals" 21 | "Cordoba"::38::3::11::24::"22 goals"::"68 goals" 22 | -------------------------------------------------------------------------------- /test/using_JuMP/data/Soccer.txt: -------------------------------------------------------------------------------- 1 | "Team"::"Played"::"Wins"::"Draws"::"Losses"::"Goals_for"::"Goals_against" 2 | "Barcelona"::38::30::4::4::"110 goals"::"21 goals" 3 | "Real Madrid"::38::30::2::6::"118 goals"::"38 goals" 4 | "Atletico Madrid"::38::23::9::6::"67 goals"::"29 goals" 5 | "Valencia"::38::22::11::5::"70 goals"::"32 goals" 6 | "Seville"::38::23::7::8::"71 goals"::"45 goals" 7 | "Villarreal"::38::16::12::10::"48 goals"::"37 goals" 8 | "Athletic Bilbao"::38::15::10::13::"42 goals"::"41 goals" 9 | "Celta Vigo"::38::13::12::13::"47 goals"::"44 goals" 10 | "Malaga"::38::14::8::16::"42 goals"::"48 goals" 11 | "Espanyol"::38::13::10::15::"47 goals"::"51 goals" 12 | "Rayo Vallecano"::38::15::4::19::"46 goals"::"68 goals" 13 | "Real Sociedad"::38::11::13::14::"44 goals"::"51 goals" 14 | "Elche"::38::11::8::19::"35 goals"::"62 goals" 15 | "Levante"::38::9::10::19::"34 goals"::"67 goals" 16 | "Getafe"::38::10::7::21::"33 goals"::"64 goals" 17 | "Deportivo La Coruna"::38::7::14::17::"35 goals"::"60 goals" 18 | "Granada"::38::7::14::17::"29 goals"::"64 goals" 19 | "Eibar"::38::9::8::21::"34 goals"::"55 goals" 20 | "Almeria"::38::8::8::22::"35 goals"::"64 goals" 21 | "Cordoba"::38::3::11::24::"22 goals"::"68 goals" 22 | -------------------------------------------------------------------------------- /notebook/using_JuMP/data/Soccer.txt: -------------------------------------------------------------------------------- 1 | "Team"::"Played"::"Wins"::"Draws"::"Losses"::"Goals_for"::"Goals_against" 2 | "Barcelona"::38::30::4::4::"110 goals"::"21 goals" 3 | "Real Madrid"::38::30::2::6::"118 goals"::"38 goals" 4 | "Atletico Madrid"::38::23::9::6::"67 goals"::"29 goals" 5 | "Valencia"::38::22::11::5::"70 goals"::"32 goals" 6 | "Seville"::38::23::7::8::"71 goals"::"45 goals" 7 | "Villarreal"::38::16::12::10::"48 goals"::"37 goals" 8 | "Athletic Bilbao"::38::15::10::13::"42 goals"::"41 goals" 9 | "Celta Vigo"::38::13::12::13::"47 goals"::"44 goals" 10 | "Malaga"::38::14::8::16::"42 goals"::"48 goals" 11 | "Espanyol"::38::13::10::15::"47 goals"::"51 goals" 12 | "Rayo Vallecano"::38::15::4::19::"46 goals"::"68 goals" 13 | "Real Sociedad"::38::11::13::14::"44 goals"::"51 goals" 14 | "Elche"::38::11::8::19::"35 goals"::"62 goals" 15 | "Levante"::38::9::10::19::"34 goals"::"67 goals" 16 | "Getafe"::38::10::7::21::"33 goals"::"64 goals" 17 | "Deportivo La Coruna"::38::7::14::17::"35 goals"::"60 goals" 18 | "Granada"::38::7::14::17::"29 goals"::"64 goals" 19 | "Eibar"::38::9::8::21::"34 goals"::"55 goals" 20 | "Almeria"::38::8::8::22::"35 goals"::"64 goals" 21 | "Cordoba"::38::3::11::24::"22 goals"::"68 goals" 22 | -------------------------------------------------------------------------------- /test/using_JuMP/problem_modification.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | 4 | 5 | model = Model() 6 | @variable(model, x); 7 | 8 | 9 | set_lower_bound(x, 3) 10 | lower_bound(x) 11 | 12 | 13 | set_lower_bound(x, 2) 14 | lower_bound(x) 15 | 16 | 17 | delete_lower_bound(x) 18 | has_lower_bound(x) 19 | 20 | 21 | fix(x, 5) 22 | fix_value(x) 23 | 24 | 25 | @variable(model, y >= 0); 26 | 27 | 28 | fix(y, 2; force = true) 29 | fix_value(y) 30 | 31 | 32 | unfix(x) 33 | is_fixed(x) 34 | 35 | 36 | all_variables(model) 37 | 38 | 39 | delete(model, x) 40 | all_variables(model) 41 | 42 | 43 | is_valid(model, x) 44 | 45 | 46 | model = Model() 47 | @variable(model, x); 48 | 49 | 50 | @constraint(model, con, 2x <= 1); 51 | 52 | 53 | set_normalized_coefficient(con, x, 3) 54 | con 55 | 56 | 57 | delete(model, con) 58 | is_valid(model, con) 59 | 60 | 61 | model = Model() 62 | @variable(model, x) 63 | @objective(model, Min, 7x + 4); 64 | 65 | 66 | objective_function(model) 67 | 68 | 69 | objective_sense(model) 70 | 71 | 72 | @objective(model, Max, 8x + 3) 73 | objective_function(model) 74 | 75 | 76 | objective_sense(model) 77 | 78 | 79 | set_objective_function(model, 5x + 11) 80 | objective_function(model) 81 | 82 | 83 | set_objective_sense(model, MOI.MIN_SENSE) 84 | objective_sense(model) 85 | 86 | -------------------------------------------------------------------------------- /test/optimization_concepts/integer_programming.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP, Random 3 | 4 | Random.seed!(1234); 5 | 6 | 7 | a = rand(1:100, 5, 5) 8 | c = rand(1:100, 5, 5) 9 | b = rand(1:100, 5) 10 | d = rand(1:100, 5) 11 | 12 | model = Model() 13 | @variable(model, x[1:5]) 14 | @variable(model, y, Bin) 15 | @constraint(model, a * x .>= y .* b) 16 | @constraint(model, c * x .>= (1 - y) .* d); 17 | 18 | 19 | a = rand(1:100, 5, 5) 20 | b = rand(1:100, 5) 21 | m = rand(10000:11000, 5) 22 | 23 | model = Model() 24 | @variable(model, x[1:5]) 25 | @variable(model, z, Bin) 26 | @constraint(model, a * x .<= b .+ (m .* (1 - z))); 27 | # If z was a regular Julia variable, we would not have had to use the vectorized dot operator 28 | 29 | 30 | model = Model() 31 | 32 | @variable(model, x) 33 | @variable(model, y) 34 | @constraint(model, x in MOI.ZeroOne()) 35 | @constraint(model, y in MOI.Integer()); 36 | 37 | 38 | l = 7.45 39 | u = 22.22 40 | @variable(model, a) 41 | @constraint(model, a in MOI.Semicontinuous(l, u)) 42 | 43 | 44 | l = 5 45 | u = 34 46 | @variable(model, b) 47 | @constraint(model, b in MOI.Semiinteger(l, u)) 48 | 49 | 50 | @variable(model, u[1:3]) 51 | @constraint(model, u in MOI.SOS1([1.0, 2.0, 3.0])) 52 | 53 | 54 | @variable(model, v[1:3]) 55 | @constraint(model, v in MOI.SOS2([3.0, 1.0, 2.0])) 56 | 57 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | name = "JuMPTutorials" 2 | uuid = "767a2c38-803a-11e9-191e-47b580bc80c9" 3 | authors = ["Arpit Bhatia "] 4 | version = "0.1.0" 5 | 6 | [deps] 7 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" 8 | Cbc = "9961bab8-2fa3-5c5a-9d89-47fab24efd76" 9 | Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" 10 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" 11 | ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199" 12 | GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6" 13 | Gadfly = "c91e804a-d5a3-530f-b6f0-dfbca275c004" 14 | GraphPlot = "a2cc645c-3eea-5389-862e-a155d0052231" 15 | Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" 16 | JuMP = "4076af6c-e467-56ae-b986-b466b2749572" 17 | LightGraphs = "093fc24a-ae57-5d10-9952-331d41423f4d" 18 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 19 | MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" 20 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 21 | SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" 22 | Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 23 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 24 | Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9" 25 | XLSX = "fdbf4ff8-1666-58a4-91e7-1b58723a45e0" 26 | 27 | [compat] 28 | CSV = "0.6" 29 | DataFrames = "0.20" 30 | JuMP = "0.20" 31 | LightGraphs = "1" 32 | Weave = "0.9" 33 | XLSX = "0.6" 34 | julia = "1" 35 | -------------------------------------------------------------------------------- /docs/src/index.md: -------------------------------------------------------------------------------- 1 | # JuMPTutorials.jl 2 | This repository contains tutorials on JuMP, a domain-specific modeling language for [mathematical optimization](http://en.wikipedia.org/wiki/Mathematical_optimization) embedded in [Julia](http://julialang.org/). 3 | 4 | ## Structure 5 | 6 | The base file for every tutorial is a regular Julia script 7 | which is converted into a Jupyter Notebook using Weave.jl for ease of access. 8 | This approach makes it easier to compare diffs and track files in Git compared to entire Jupyter notebooks. 9 | It also allows us to set up CI testing for the tutorials to ensure that they produce the expected output 10 | and don’t suffer from bit rot over time. 11 | 12 | The base files are present in the script folder inside a subfolder of the relevant category. 13 | Jupyter notebooks generated using Weave.jl are found in the notebook folder. 14 | The tests folder contains relevant code extracted from the base files for testing and 15 | the src folder has the Weave.jl utilities used for conversion. 16 | 17 | ## Contributors 18 | 19 | - Arpit Bhatia ([@barpit20](https://github.com/barpit20)) 20 | - Chris Coey ([@chriscoey](https://github.com/chriscoey)) 21 | - Lea Kapelevich ([@lkapelevich](https://github.com/lkapelevich)) 22 | - Joaquim Dias Garcia ([@joaquimg](https://github.com/joaquimg)) 23 | - Juan Pablo Vielma ([@juan-pablo-vielma](https://github.com/juan-pablo-vielma)) 24 | - Iain Dunning ([@IainNZ](https://github.com/IainNZ)) 25 | - Miles Lubin ([@mlubin](https://github.com/mlubin)) 26 | - Shuvomoy Das Gupta ([@Shuvomoy](https://github.com/Shuvomoy)) 27 | - [Yury Dvorkin](https://wp.nyu.edu/dvorkin/about-yury/) -------------------------------------------------------------------------------- /test/modelling/experiment_design.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using SCS 4 | using LinearAlgebra 5 | using Random 6 | 7 | Random.seed!(1234); 8 | 9 | q = 4 # dimension of estimate space 10 | p = 8 # number of experimental vectors 11 | nmax = 3 # upper bound on lambda 12 | n = 12 13 | 14 | V = randn(q, p) 15 | 16 | eye = Matrix{Float64}(I, q, q); 17 | 18 | 19 | aOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 20 | @variable(aOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 21 | @variable(aOpt, u[1:q], lower_bound = 0) 22 | 23 | @constraint(aOpt, sum(np) <= n) 24 | for i = 1:q 25 | @SDconstraint(aOpt, [V * diagm(0 => np ./ n) * V' eye[:, i]; eye[i, :]' u[i]] >= 0) 26 | end 27 | 28 | @objective(aOpt, Min, sum(u)) 29 | 30 | optimize!(aOpt) 31 | 32 | @show objective_value(aOpt); 33 | @show value.(np); 34 | 35 | 36 | eOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 37 | @variable(eOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 38 | @variable(eOpt, t) 39 | 40 | @SDconstraint(eOpt, V * diagm(0 => np ./ n) * V' - (t .* eye) >= 0) 41 | @constraint(eOpt, sum(np) <= n) 42 | 43 | @objective(eOpt, Max, t) 44 | 45 | optimize!(eOpt) 46 | 47 | @show objective_value(eOpt); 48 | @show value.(np); 49 | 50 | 51 | dOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 52 | @variable(dOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 53 | @variable(dOpt, t) 54 | @objective(dOpt, Max, t) 55 | @constraint(dOpt, sum(np) <= n) 56 | E = V * diagm(0 => np ./ n) * V' 57 | @constraint(dOpt, [t, 1, (E[i, j] for i in 1:q for j in 1:i)...] in MOI.LogDetConeTriangle(q)) 58 | 59 | optimize!(dOpt) 60 | 61 | @show objective_value(dOpt); 62 | @show value.(np); 63 | 64 | -------------------------------------------------------------------------------- /test/using_JuMP/nonlinear_modelling.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP, Ipopt 3 | model = Model(with_optimizer(Ipopt.Optimizer)); 4 | 5 | 6 | @variable(model, x, start = 4) 7 | @variable(model, y, start = -9.66); 8 | 9 | 10 | @NLparameter(model, p == 0.003); # Providing a starting value is necessary for parameters 11 | @NLparameter(model, l[i = 1:10] == 4 - i); # A collection of parameters 12 | 13 | 14 | value(l[1]) 15 | 16 | 17 | set_value(l[1], -4) 18 | value(l[1]) 19 | 20 | 21 | @NLexpression(model, expr_1, sin(x)) 22 | @NLexpression(model, expr_2, asin(expr_1)); # Inserting one expression into another 23 | 24 | 25 | @NLconstraint(model, exp(x) + y^4 <= 0) 26 | @NLobjective(model, Min, tan(x) + log(y)) 27 | 28 | 29 | my_function(a,b) = (a * b)^-6 + (b / a)^3 30 | register(model, :my_function, 2, my_function, autodiff = true) 31 | 32 | 33 | using Random, Statistics 34 | 35 | Random.seed!(1234) 36 | 37 | n = 1_000 38 | data = randn(n) 39 | 40 | mle = Model(with_optimizer(Ipopt.Optimizer, print_level = 0)) 41 | @NLparameter(mle, problem_data[i = 1:n] == data[i]) 42 | @variable(mle, μ, start = 0.0) 43 | @variable(mle, σ >= 0.0, start = 1.0) 44 | @NLexpression(mle, likelihood, 45 | (2 * π * σ^2)^(-n / 2) * exp(-(sum((problem_data[i] - μ)^2 for i in 1:n) / (2 * σ^2))) 46 | ) 47 | 48 | @NLobjective(mle, Max, log(likelihood)) 49 | 50 | optimize!(mle) 51 | 52 | println("μ = ", value(μ)) 53 | println("mean(data) = ", mean(data)) 54 | println("σ^2 = ", value(σ)^2) 55 | println("var(data) = ", var(data)) 56 | println("MLE objective: ", objective_value(mle)) 57 | 58 | 59 | # Changing the data 60 | 61 | data = randn(n) 62 | optimize!(mle) 63 | 64 | println("μ = ", value(μ)) 65 | println("mean(data) = ", mean(data)) 66 | println("σ^2 = ", value(σ)^2) 67 | println("var(data) = ", var(data)) 68 | println("MLE objective: ", objective_value(mle)) 69 | 70 | -------------------------------------------------------------------------------- /test/optimization_concepts/conic_programming.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using ECOS 4 | using Random 5 | 6 | Random.seed!(1234); 7 | 8 | 9 | u0 = rand(10) 10 | p = rand(10) 11 | q = rand(); 12 | 13 | 14 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 15 | @variable(model, u[1:10]) 16 | @variable(model, t) 17 | @objective(model, Min, t) 18 | @constraint(model, [t, (u - u0)...] in SecondOrderCone()) 19 | @constraint(model, u' * p == q) 20 | optimize!(model) 21 | 22 | 23 | @show objective_value(model); 24 | @show value.(u); 25 | 26 | 27 | e1 = [1, zeros(10)...] 28 | dual_model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 29 | @variable(dual_model, y1 <= 0) 30 | @variable(dual_model, y2[1:11]) 31 | @objective(dual_model, Max, q * y1 + [0, u0...]' * y2) 32 | @constraint(dual_model, e1 - [0, p...] .* y1 - y2 .== 0) 33 | @constraint(dual_model, y2 in SecondOrderCone()) 34 | optimize!(dual_model) 35 | 36 | 37 | @show objective_value(dual_model); 38 | 39 | 40 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 41 | @variable(model, u[1:10]) 42 | @variable(model, t) 43 | @objective(model, Min, t) 44 | @constraint(model, [t, 0.5, (u - u0)...] in RotatedSecondOrderCone()) 45 | @constraint(model, u' * p == q) 46 | optimize!(model) 47 | 48 | 49 | @show value.(u); 50 | 51 | 52 | n = 15; 53 | m = 10; 54 | A = randn(m, n); 55 | b = rand(m, 1); 56 | 57 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 58 | @variable(model, t[1:n]) 59 | @variable(model, x[1:n]) 60 | @objective(model, Max, sum(t)) 61 | @constraint(model, sum(x) == 1) 62 | @constraint(model, A * x .<= b ) 63 | # Cannot use the exponential cone directly in JuMP, hence we use MOI to specify the set. 64 | @constraint(model, con[i = 1:n], [1, x[i], t[i]] in MOI.ExponentialCone()) 65 | 66 | optimize!(model); 67 | 68 | 69 | @show objective_value(model); 70 | 71 | -------------------------------------------------------------------------------- /test/using_JuMP/data/StarWars.csv: -------------------------------------------------------------------------------- 1 | Name,Gender,Height,Weight,Eyecolor,Haircolor,Skincolor,Homeland,Born,Died,Jedi,Species,Weapon 2 | Anakin Skywalker,male,1.88,84,blue,blond,fair,Tatooine,41.9BBY,4ABY,jedi,human,lightsaber 3 | Padme Amidala,female,1.65,45,brown,brown,light,Naboo,46BBY,19BBY,no_jedi,human,unarmed 4 | Luke Skywalker,male,1.72,77,blue,blond,fair,Tatooine,19BBY,unk_died,jedi,human,lightsaber 5 | Leia Skywalker,female,1.5,49,brown,brown,light,Alderaan,19BBY,unk_died,no_jedi,human,blaster 6 | Qui-Gon Jinn,male,1.93,88.5,blue,brown,light,unk_planet,92BBY,32BBY,jedi,human,lightsaber 7 | Obi-Wan Kenobi,male,1.82,77,bluegray,auburn,fair,Stewjon,57BBY,0BBY,jedi,human,lightsaber 8 | Han Solo,male,1.8,80,brown,brown,light,Corellia,29BBY,unk_died,no_jedi,human,blaster 9 | Sheev Palpatine,male,1.73,75,blue,red,pale,Naboo,82BBY,10ABY,no_jedi,human,force-lightning 10 | R2-D2,male,0.96,32,NA,NA,NA,Naboo,33BBY,unk_died,no_jedi,droid,unarmed 11 | C-3PO,male,1.67,75,NA,NA,NA,Tatooine,112BBY,3ABY,no_jedi,droid,unarmed 12 | Yoda,male,0.66,17,brown,brown,green,unk_planet,896BBY,4ABY,jedi,yoda,lightsaber 13 | Darth Maul,male,1.75,80,yellow,none,red,Dathomir,54BBY,unk_died,no_jedi,dathomirian,lightsaber 14 | Dooku,male,1.93,86,brown,brown,light,Serenno,102BBY,19BBY,jedi,human,lightsaber 15 | Chewbacca,male,2.28,112,blue,brown,NA,Kashyyyk,200BBY,25ABY,no_jedi,wookiee,bowcaster 16 | Jabba,male,3.9,NA,yellow,none,tan-green,Tatooine,unk_born,4ABY,no_jedi,hutt,unarmed 17 | Lando Calrissian,male,1.78,79,brown,blank,dark,Socorro,31BBY,unk_died,no_jedi,human,blaster 18 | Boba Fett,male,1.83,78,brown,black,brown,Kamino,31.5BBY,unk_died,no_jedi,human,blaster 19 | Jango Fett,male,1.83,79,brown,black,brown,ConcordDawn,66BBY,22BBY,no_jedi,human,blaster 20 | Grievous,male,2.16,159,gold,black,orange,Kalee,unk_born,19BBY,no_jedi,kaleesh,slugthrower 21 | Chief Chirpa,male,1,50,black,gray,brown,Endor,unk_born,4ABY,no_jedi,ewok,spear 22 | -------------------------------------------------------------------------------- /notebook/using_JuMP/data/StarWars.csv: -------------------------------------------------------------------------------- 1 | Name,Gender,Height,Weight,Eyecolor,Haircolor,Skincolor,Homeland,Born,Died,Jedi,Species,Weapon 2 | Anakin Skywalker,male,1.88,84,blue,blond,fair,Tatooine,41.9BBY,4ABY,jedi,human,lightsaber 3 | Padme Amidala,female,1.65,45,brown,brown,light,Naboo,46BBY,19BBY,no_jedi,human,unarmed 4 | Luke Skywalker,male,1.72,77,blue,blond,fair,Tatooine,19BBY,unk_died,jedi,human,lightsaber 5 | Leia Skywalker,female,1.5,49,brown,brown,light,Alderaan,19BBY,unk_died,no_jedi,human,blaster 6 | Qui-Gon Jinn,male,1.93,88.5,blue,brown,light,unk_planet,92BBY,32BBY,jedi,human,lightsaber 7 | Obi-Wan Kenobi,male,1.82,77,bluegray,auburn,fair,Stewjon,57BBY,0BBY,jedi,human,lightsaber 8 | Han Solo,male,1.8,80,brown,brown,light,Corellia,29BBY,unk_died,no_jedi,human,blaster 9 | Sheev Palpatine,male,1.73,75,blue,red,pale,Naboo,82BBY,10ABY,no_jedi,human,force-lightning 10 | R2-D2,male,0.96,32,NA,NA,NA,Naboo,33BBY,unk_died,no_jedi,droid,unarmed 11 | C-3PO,male,1.67,75,NA,NA,NA,Tatooine,112BBY,3ABY,no_jedi,droid,unarmed 12 | Yoda,male,0.66,17,brown,brown,green,unk_planet,896BBY,4ABY,jedi,yoda,lightsaber 13 | Darth Maul,male,1.75,80,yellow,none,red,Dathomir,54BBY,unk_died,no_jedi,dathomirian,lightsaber 14 | Dooku,male,1.93,86,brown,brown,light,Serenno,102BBY,19BBY,jedi,human,lightsaber 15 | Chewbacca,male,2.28,112,blue,brown,NA,Kashyyyk,200BBY,25ABY,no_jedi,wookiee,bowcaster 16 | Jabba,male,3.9,NA,yellow,none,tan-green,Tatooine,unk_born,4ABY,no_jedi,hutt,unarmed 17 | Lando Calrissian,male,1.78,79,brown,blank,dark,Socorro,31BBY,unk_died,no_jedi,human,blaster 18 | Boba Fett,male,1.83,78,brown,black,brown,Kamino,31.5BBY,unk_died,no_jedi,human,blaster 19 | Jango Fett,male,1.83,79,brown,black,brown,ConcordDawn,66BBY,22BBY,no_jedi,human,blaster 20 | Grievous,male,2.16,159,gold,black,orange,Kalee,unk_born,19BBY,no_jedi,kaleesh,slugthrower 21 | Chief Chirpa,male,1,50,black,gray,brown,Endor,unk_born,4ABY,no_jedi,ewok,spear 22 | -------------------------------------------------------------------------------- /script/using_JuMP/data/StarWars.csv: -------------------------------------------------------------------------------- 1 | Name,Gender,Height,Weight,Eyecolor,Haircolor,Skincolor,Homeland,Born,Died,Jedi,Species,Weapon 2 | Anakin Skywalker,male,1.88,84,blue,blond,fair,Tatooine,41.9BBY,4ABY,jedi,human,lightsaber 3 | Padme Amidala,female,1.65,45,brown,brown,light,Naboo,46BBY,19BBY,no_jedi,human,unarmed 4 | Luke Skywalker,male,1.72,77,blue,blond,fair,Tatooine,19BBY,unk_died,jedi,human,lightsaber 5 | Leia Skywalker,female,1.5,49,brown,brown,light,Alderaan,19BBY,unk_died,no_jedi,human,blaster 6 | Qui-Gon Jinn,male,1.93,88.5,blue,brown,light,unk_planet,92BBY,32BBY,jedi,human,lightsaber 7 | Obi-Wan Kenobi,male,1.82,77,bluegray,auburn,fair,Stewjon,57BBY,0BBY,jedi,human,lightsaber 8 | Han Solo,male,1.8,80,brown,brown,light,Corellia,29BBY,unk_died,no_jedi,human,blaster 9 | Sheev Palpatine,male,1.73,75,blue,red,pale,Naboo,82BBY,10ABY,no_jedi,human,force-lightning 10 | R2-D2,male,0.96,32,NA,NA,NA,Naboo,33BBY,unk_died,no_jedi,droid,unarmed 11 | C-3PO,male,1.67,75,NA,NA,NA,Tatooine,112BBY,3ABY,no_jedi,droid,unarmed 12 | Yoda,male,0.66,17,brown,brown,green,unk_planet,896BBY,4ABY,jedi,yoda,lightsaber 13 | Darth Maul,male,1.75,80,yellow,none,red,Dathomir,54BBY,unk_died,no_jedi,dathomirian,lightsaber 14 | Dooku,male,1.93,86,brown,brown,light,Serenno,102BBY,19BBY,jedi,human,lightsaber 15 | Chewbacca,male,2.28,112,blue,brown,NA,Kashyyyk,200BBY,25ABY,no_jedi,wookiee,bowcaster 16 | Jabba,male,3.9,NA,yellow,none,tan-green,Tatooine,unk_born,4ABY,no_jedi,hutt,unarmed 17 | Lando Calrissian,male,1.78,79,brown,blank,dark,Socorro,31BBY,unk_died,no_jedi,human,blaster 18 | Boba Fett,male,1.83,78,brown,black,brown,Kamino,31.5BBY,unk_died,no_jedi,human,blaster 19 | Jango Fett,male,1.83,79,brown,black,brown,ConcordDawn,66BBY,22BBY,no_jedi,human,blaster 20 | Grievous,male,2.16,159,gold,black,orange,Kalee,unk_born,19BBY,no_jedi,kaleesh,slugthrower 21 | Chief Chirpa,male,1,50,black,gray,brown,Endor,unk_born,4ABY,no_jedi,ewok,spear 22 | -------------------------------------------------------------------------------- /test/introduction/solvers_and_solutions.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | 5 | 6 | model_auto = Model(with_optimizer(GLPK.Optimizer)) 7 | @variable(model_auto, 0 <= x <= 1) 8 | @variable(model_auto, 0 <= y <= 1) 9 | @constraint(model_auto, x + y <= 1) 10 | @objective(model_auto, Max, x + 2y) 11 | optimize!(model_auto) 12 | objective_value(model_auto) 13 | 14 | 15 | model_auto_no = Model() 16 | @variable(model_auto_no, 0 <= x <= 1) 17 | @variable(model_auto_no, 0 <= y <= 1) 18 | @constraint(model_auto_no, x + y <= 1) 19 | @objective(model_auto_no, Max, x + 2y) 20 | optimize!(model_auto_no, with_optimizer(GLPK.Optimizer)) 21 | objective_value(model_auto_no) 22 | 23 | 24 | model_manual = Model(with_optimizer(GLPK.Optimizer),caching_mode = MOIU.MANUAL) 25 | @variable(model_manual, 0 <= x <= 1) 26 | @variable(model_manual, 0 <= y <= 1) 27 | @constraint(model_manual, x + y <= 1) 28 | @objective(model_manual, Max, x + 2y) 29 | MOIU.attach_optimizer(model_manual) 30 | optimize!(model_manual) 31 | objective_value(model_manual) 32 | 33 | 34 | model_direct = direct_model(GLPK.Optimizer()) 35 | @variable(model_direct, 0 <= x <= 1) 36 | @variable(model_direct, 0 <= y <= 1) 37 | @constraint(model_direct, x + y <= 1) 38 | @objective(model_direct, Max, x + 2y) 39 | optimize!(model_direct) 40 | objective_value(model_direct) 41 | 42 | 43 | using Cbc 44 | 45 | 46 | model = Model(with_optimizer(Cbc.Optimizer, logLevel = 0)); 47 | 48 | 49 | model = Model(with_optimizer(Cbc.Optimizer, max_iters = 10000)); 50 | 51 | 52 | model = Model(with_optimizer(Cbc.Optimizer, seconds = 5)); 53 | 54 | 55 | termination_status(model_auto) 56 | 57 | 58 | display(typeof(MOI.OPTIMAL)) 59 | 60 | 61 | primal_status(model_auto) 62 | 63 | 64 | dual_status(model_auto) 65 | 66 | 67 | display(typeof(MOI.FEASIBLE_POINT)) 68 | 69 | 70 | @show value(x) 71 | @show value(y) 72 | @show objective_value(model_auto) 73 | 74 | -------------------------------------------------------------------------------- /docs/src/notes.md: -------------------------------------------------------------------------------- 1 | # Notes 2 | 3 | ## Using Solvers and Other Additional Packages 4 | Certain solvers may require a license or an additional installation. Since we want to test the tutorials using Travis CI and run them in the browser using Binder, we should always try to go for the solver which works out of the box with just a Package install. In case this is not possible, do not add the tutorial to the `runtests.jl` file and add a line that it does not work with Binder. Note that this issue is not specific to solvers and may arise with other Julia packages as well. 5 | 6 | ## Tangle 7 | We use the tangle feature of Weave.jl to generate files for testing as it allows us extract code from a file. This enables us to skip cetains blocks of code we would not want for tests. These include- 8 | ### Deliberate Errors 9 | Tutorials might contain examples of what not to do and hence some code blocks can throw errors on purpose. These blocks cause tests to fail when we run the entire tutorial and hence they are skipped from tangle. 10 | ### Installing Packages 11 | THough code that installs packages will not cause tests to fail, it will slow them down. Since we are already adding all the required packages as dependencies, blocks that install packages are skipped from tangle. 12 | 13 | ## Citations 14 | Citations are added manually using links in markdown and not through tools like BibTeX. This method works for our use case as the number of citations per tutorial is quite less. Boilerplate code you can use is given below: 15 | 16 | ```julia 17 | #' Lets add a citation[[1]](#c1). 18 | #' Here's another one[[2]](#c2). 19 | 20 | #' ### References 21 | #' 22 | #' 1. First citation in plain text. 23 | #' 24 | #' 2. Second citation in plain text. 25 | ``` 26 | 27 | ## Graphics in the Notebooks 28 | The `notebook` function in Weave.jl currently does not support Gadfly plot output([116](https://github.com/mpastell/Weave.jl/issues/116)). In case a tutorial has plots, you'll have to open the generated notebook in Jupyter and from the menu select "Cell > Run All". -------------------------------------------------------------------------------- /test/modelling/network_flows.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | 5 | 6 | G = [ 7 | 0 100 30 0 0; 8 | 0 0 20 0 0; 9 | 0 0 0 10 60; 10 | 0 15 0 0 50; 11 | 0 0 0 0 0 12 | ] 13 | 14 | n = size(G)[1] 15 | 16 | shortest_path = Model(with_optimizer(GLPK.Optimizer)) 17 | 18 | @variable(shortest_path, x[1:n,1:n], Bin) 19 | @constraint(shortest_path, [i = 1:n, j = 1:n; G[i,j] == 0], x[i,j] == 0) # Arcs with zero cost are not a part of the path as they do no exist 20 | @constraint(shortest_path, [i = 1:n; i != 1 && i != 2], sum(x[i,:]) == sum(x[:,i])) # Flow conservation constraint 21 | @constraint(shortest_path, sum(x[1,:]) - sum(x[:,1]) == 1) # Flow coming out of source = 1 22 | @constraint(shortest_path, sum(x[2,:]) - sum(x[:,2]) == -1) # Flowing coming out of destination = -1 i.e. Flow entering destination = 1 23 | @objective(shortest_path, Min, sum(G .* x)) 24 | 25 | optimize!(shortest_path) 26 | @show objective_value(shortest_path); 27 | @show value.(x); 28 | 29 | 30 | G = [ 31 | 6 4 5 0; 32 | 0 3 6 0; 33 | 5 0 4 3; 34 | 7 5 5 5; 35 | ] 36 | 37 | n = size(G)[1] 38 | 39 | assignment = Model(with_optimizer(GLPK.Optimizer)) 40 | @variable(assignment, y[1:n,1:n], Bin) 41 | @constraint(assignment, [i = 1:n], sum(y[:,i]) == 1) # One person can only be assigned to one object 42 | @constraint(assignment, [j = 1:n], sum(y[j,:]) == 1) # One object can only be assigned to one person 43 | @objective(assignment, Max, sum(G .* y)) 44 | 45 | optimize!(assignment) 46 | @show objective_value(assignment); 47 | @show value.(y); 48 | 49 | 50 | G = [ 51 | 0 3 2 2 0 0 0 0 52 | 0 0 0 0 5 1 0 0 53 | 0 0 0 0 1 3 1 0 54 | 0 0 0 0 0 1 0 0 55 | 0 0 0 0 0 0 0 4 56 | 0 0 0 0 0 0 0 2 57 | 0 0 0 0 0 0 0 4 58 | 0 0 0 0 0 0 0 0 59 | ] 60 | 61 | n = size(G)[1] 62 | 63 | max_flow = Model(with_optimizer(GLPK.Optimizer)) 64 | 65 | @variable(max_flow, f[1:n,1:n] >= 0) 66 | @constraint(max_flow, [i = 1:n, j = 1:n], f[i,j] <= G[i,j]) # Capacity constraints 67 | @constraint(max_flow, [i = 1:n; i != 1 && i != 8], sum(f[i,:]) == sum(f[:,i])) # Flow conservation contraints 68 | @objective(max_flow, Max, sum(f[1, :])) 69 | 70 | optimize!(max_flow) 71 | @show objective_value(max_flow); 72 | @show value.(f); 73 | 74 | -------------------------------------------------------------------------------- /test/using_JuMP/working_with_data_files.jl: -------------------------------------------------------------------------------- 1 | 2 | using DataFrames 3 | using XLSX 4 | 5 | 6 | data_dir = joinpath(@__DIR__, "data") 7 | excel_df = DataFrame(XLSX.readtable(joinpath(data_dir, "SalesData.xlsx"), "SalesOrders")...) 8 | 9 | 10 | using CSV 11 | csv_df = CSV.read(joinpath(data_dir, "StarWars.csv")) 12 | 13 | 14 | ss_df = CSV.read(joinpath(data_dir, "Cereal.txt")) 15 | 16 | 17 | delim_df = CSV.read(joinpath(data_dir, "Soccer.txt"), delim = "::") 18 | 19 | 20 | size(ss_df) 21 | 22 | 23 | nrow(ss_df), ncol(ss_df) 24 | 25 | 26 | describe(ss_df) 27 | 28 | 29 | names(ss_df) 30 | 31 | 32 | eltype.(ss_df) 33 | 34 | 35 | csv_df[1,1] 36 | 37 | 38 | csv_df[!, 1] 39 | 40 | 41 | csv_df[!, :Name] 42 | 43 | 44 | csv_df.Name 45 | 46 | 47 | csv_df[:, 1] # note that this creates a copy 48 | 49 | 50 | csv_df[1:1, :] 51 | 52 | 53 | csv_df[1, :] # this produces a DataFrameRow 54 | 55 | 56 | excel_df[1:3, 5] .= 1 57 | 58 | 59 | excel_df[4:6, 5] = [4, 5, 6] 60 | 61 | 62 | excel_df[1:2, 6:7] = DataFrame([-2 -2; -2 -2], [Symbol("Unit Cost"), :Total]) 63 | 64 | 65 | excel_df 66 | 67 | 68 | passportdata = CSV.read(joinpath(data_dir, "passport-index-matrix.csv"), copycols = true) 69 | 70 | for i in 1:nrow(passportdata) 71 | for j in 2:ncol(passportdata) 72 | if passportdata[i,j] == -1 || passportdata[i,j] == 3 73 | passportdata[i,j] = 1 74 | else 75 | passportdata[i,j] = 0 76 | end 77 | end 78 | end 79 | 80 | 81 | using JuMP, GLPK 82 | 83 | # Finding number of countries 84 | n = ncol(passportdata) - 1 # Subtract 1 for column representing country of passport 85 | 86 | model = Model(with_optimizer(GLPK.Optimizer)) 87 | @variable(model, pass[1:n], Bin) 88 | @constraint(model, [j = 2:n], sum(passportdata[i,j] * pass[i] for i in 1:n) >= 1) 89 | @objective(model, Min, sum(pass)) 90 | optimize!(model) 91 | 92 | println("Minimum number of passports needed: ", objective_value(model)) 93 | 94 | 95 | countryindex = findall(value.(pass) .== 1 ) 96 | 97 | print("Countries: ") 98 | for i in countryindex 99 | print(names(passportdata)[i+1], " ") 100 | end 101 | 102 | -------------------------------------------------------------------------------- /test/modelling/sudoku.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | 5 | 6 | # Create a model 7 | sudoku = Model(with_optimizer(GLPK.Optimizer)) 8 | 9 | # Create our variables 10 | @variable(sudoku, x[i=1:9, j=1:9, k=1:9], Bin); 11 | 12 | 13 | for i = 1:9, j = 1:9 # Each row and each column 14 | # Sum across all the possible digits 15 | # One and only one of the digits can be in this cell, 16 | # so the sum must be equal to one 17 | @constraint(sudoku, sum(x[i,j,k] for k in 1:9) == 1) 18 | end 19 | 20 | 21 | for ind = 1:9 # Each row, OR each column 22 | for k = 1:9 # Each digit 23 | # Sum across columns (j) - row constraint 24 | @constraint(sudoku, sum(x[ind,j,k] for j in 1:9) == 1) 25 | # Sum across rows (i) - column constraint 26 | @constraint(sudoku, sum(x[i,ind,k] for i in 1:9) == 1) 27 | end 28 | end 29 | 30 | 31 | for i = 1:3:7, j = 1:3:7, k = 1:9 32 | # i is the top left row, j is the top left column 33 | # We'll sum from i to i+2, e.g. i=4, r=4, 5, 6 34 | @constraint(sudoku, sum(x[r,c,k] for r in i:i+2, c in j:j+2) == 1) 35 | end 36 | 37 | 38 | # The given digits 39 | init_sol = [ 5 3 0 0 7 0 0 0 0; 40 | 6 0 0 1 9 5 0 0 0; 41 | 0 9 8 0 0 0 0 6 0; 42 | 8 0 0 0 6 0 0 0 3; 43 | 4 0 0 8 0 3 0 0 1; 44 | 7 0 0 0 2 0 0 0 6; 45 | 0 6 0 0 0 0 2 8 0; 46 | 0 0 0 4 1 9 0 0 5; 47 | 0 0 0 0 8 0 0 7 9] 48 | for i = 1:9, j = 1:9 49 | # If the space isn't empty 50 | if init_sol[i,j] != 0 51 | # Then the corresponding variable for that digit 52 | # and location must be 1 53 | @constraint(sudoku, x[i,j,init_sol[i,j]] == 1) 54 | end 55 | end 56 | 57 | # solve problem 58 | optimize!(sudoku) 59 | 60 | # Extract the values of x 61 | x_val = value.(x) 62 | # Create a matrix to store the solution 63 | sol = zeros(Int,9,9) # 9x9 matrix of integers 64 | for i in 1:9, j in 1:9, k in 1:9 65 | # Integer programs are solved as a series of linear programs 66 | # so the values might not be precisely 0 and 1. We can just 67 | # round them to the nearest integer to make it easier 68 | if round(Int,x_val[i,j,k]) == 1 69 | sol[i,j] = k 70 | end 71 | end 72 | # Display the solution 73 | sol 74 | 75 | -------------------------------------------------------------------------------- /test/introduction/variables_constraints_objective.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | model = Model(); 4 | 5 | 6 | @variable(model, free_x) 7 | 8 | 9 | @variable(model, keyword_x, lower_bound = 1, upper_bound = 2) 10 | 11 | 12 | has_upper_bound(keyword_x) 13 | 14 | 15 | upper_bound(keyword_x) 16 | 17 | 18 | @variable(model, a[1:2, 1:2]) 19 | 20 | 21 | n = 10 22 | l = [1; 2; 3; 4; 5; 6; 7; 8; 9; 10] 23 | u = [10; 11; 12; 13; 14; 15; 16; 17; 18; 19] 24 | 25 | @variable(model, l[i] <= x[i = 1:n] <= u[i]) 26 | 27 | 28 | @variable(model, y[i = 1:2, j = 1:2] >= 2i + j) 29 | 30 | 31 | @variable(model, z[i = 2:3, j = 1:2:3] >= 0) 32 | 33 | 34 | @variable(model, w[1:5,["red", "blue"]] <= 1) 35 | 36 | 37 | @variable(model, u[i = 1:3, j = i:5]) 38 | 39 | 40 | @variable(model, v[i = 1:9; mod(i, 3) == 0]) 41 | 42 | 43 | @variable(model, integer_z, integer = true) 44 | 45 | 46 | @variable(model, binary_z, binary = true) 47 | 48 | 49 | @variable(model, psd_x[1:2, 1:2], PSD) 50 | 51 | 52 | @variable(model, sym_x[1:2, 1:2], Symmetric) 53 | 54 | 55 | model = Model() 56 | @variable(model, x) 57 | @variable(model, y) 58 | @variable(model, z[1:10]); 59 | 60 | 61 | @constraint(model, con, x <= 4) 62 | 63 | 64 | @constraint(model, [i = 1:3], i * x <= i + 1) 65 | 66 | 67 | @constraint(model, [i = 1:2, j = 2:3], i * x <= j + 1) 68 | 69 | 70 | @constraint(model, [i = 1:2, j = 1:2; i != j], i * x <= j + 1) 71 | 72 | 73 | for i in 1:3 74 | @constraint(model, 6x + 4y >= 5i) 75 | end 76 | 77 | 78 | @constraint(model, [i in 1:3], 6x + 4y >= 5i) 79 | 80 | 81 | @constraint(model, sum(z[i] for i in 1:10) <= 1) 82 | 83 | 84 | using GLPK 85 | 86 | model = Model(with_optimizer(GLPK.Optimizer)) 87 | @variable(model, x >= 0) 88 | @variable(model, y >= 0) 89 | set_objective_sense(model, MOI.MIN_SENSE) 90 | set_objective_function(model, x + y) 91 | 92 | optimize!(model) 93 | 94 | @show objective_value(model); 95 | 96 | 97 | objective_sense(model) 98 | 99 | 100 | objective_function(model) 101 | 102 | 103 | objective_function_type(model) 104 | 105 | 106 | vector_model = Model(with_optimizer(GLPK.Optimizer)) 107 | 108 | A= [ 1 1 9 5; 109 | 3 5 0 8; 110 | 2 0 6 13] 111 | 112 | b = [7; 3; 5] 113 | 114 | c = [1; 3; 5; 2] 115 | 116 | @variable(vector_model, x[1:4] >= 0) 117 | @constraint(vector_model, A * x .== b) 118 | @objective(vector_model, Min, c' * x) 119 | 120 | optimize!(vector_model) 121 | 122 | @show objective_value(vector_model); 123 | 124 | -------------------------------------------------------------------------------- /test/modelling/finance.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | 5 | 6 | financing = Model(with_optimizer(GLPK.Optimizer)) 7 | 8 | @variables(financing, begin 9 | 0 <= u[1:5] <= 100 10 | 0 <= v[1:3] 11 | 0 <= w[1:5] 12 | m 13 | end) 14 | 15 | @objective(financing, Max, m) # Money at the end of June 16 | 17 | @constraints(financing, begin 18 | u[1] + v[1] - w[1] == 150 # January 19 | u[2] + v[2] - w[2] - 1.01u[1] + 1.003w[1] == 100 # February 20 | u[3] + v[3] - w[3] - 1.01u[2] + 1.003w[2] == -200 # March 21 | u[4] - w[4] - 1.02v[1] - 1.01u[3] + 1.003w[3] == 200 # April 22 | u[5] - w[5] - 1.02v[2] - 1.01u[4] + 1.003w[4] == -50 # May 23 | -m - 1.02v[3] - 1.01u[5] + 1.003w[5] == -300 # June 24 | end) 25 | 26 | optimize!(financing) 27 | @show objective_value(financing); 28 | 29 | 30 | bid_values = [6 3 12 12 8 16] 31 | bid_items = [[1], [2], [3 4], [1 3], [2 4], [1 3 4]] 32 | 33 | auction = Model(with_optimizer(GLPK.Optimizer)) 34 | @variable(auction, y[1:6], Bin) 35 | @objective(auction, Max, sum(y' .* bid_values)) 36 | for i in 1:6 37 | @constraint(auction, sum(y[j] for j in 1:6 if i in bid_items[j]) <= 1) 38 | end 39 | 40 | optimize!(auction) 41 | 42 | @show objective_value(auction); 43 | @show value.(y); 44 | 45 | 46 | using Statistics # Useful for calculations 47 | using Ipopt # Ipopt since our objective is quadratic 48 | 49 | stock_data = [ 50 | 93.043 51.826 1.063; 51 | 84.585 52.823 0.938; 52 | 111.453 56.477 1.000; 53 | 99.525 49.805 0.938; 54 | 95.819 50.287 1.438; 55 | 114.708 51.521 1.700; 56 | 111.515 51.531 2.540; 57 | 113.211 48.664 2.390; 58 | 104.942 55.744 3.120; 59 | 99.827 47.916 2.980; 60 | 91.607 49.438 1.900; 61 | 107.937 51.336 1.750; 62 | 115.590 55.081 1.800; 63 | ] 64 | 65 | # Calculating stock returns 66 | 67 | stock_returns = Array{Float64}(undef, 12, 3) 68 | 69 | for i in 1:12 70 | stock_returns[i, :] = (stock_data[i + 1, :] .- stock_data[i, :]) ./ stock_data[i, :] 71 | end 72 | 73 | # Calculating the expected value of monthly return 74 | 75 | r = [Statistics.mean(stock_returns[:,1]) Statistics.mean(stock_returns[:,2]) Statistics.mean(stock_returns[:,3])]' 76 | 77 | # Calculating the covariance matrix Q 78 | 79 | Q = Statistics.cov(stock_returns) 80 | 81 | 82 | # JuMP Model 83 | 84 | portfolio = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 85 | @variable(portfolio, x[1:3] >= 0) 86 | @objective(portfolio, Min, x' * Q * x) 87 | @constraint(portfolio, sum(x) <= 1000) 88 | @constraint(portfolio, sum(r .* x) >= 50) 89 | 90 | optimize!(portfolio) 91 | 92 | @show objective_value(portfolio); 93 | @show value.(x); 94 | 95 | -------------------------------------------------------------------------------- /docs/src/new.md: -------------------------------------------------------------------------------- 1 | # Adding a New Tutorial 2 | 3 | ## 1. Write the Tutorial 4 | 5 | Tutorials should be written in the form of a `.jl` file and 6 | added to the script folder inside a subfolder of the relevant category. 7 | 8 | Lines starting with `#'` are used to denote markdown blocks. 9 | For consistency, we always use `#'` even though Weave supports using `#%%` or `# %%`. 10 | For ease of viewing and reviewing diff code, 11 | please use [Semantic Line Breaks](https://sembr.org/) in text. 12 | 13 | We use a YAML header in the beginning of the input document delimited with "–-" to set the document title. 14 | The author's name is specified in the next line as "Originally Contributed by". 15 | We use this format as the code in the notebook may be updated in the future based on changes to JuMP or Julia. 16 | 17 | [](http://weavejl.mpastell.com/dev/usage/#Tangle-1) 18 | 19 | Boilerplate code you can use to get started is given below: 20 | 21 | ```julia 22 | #' --- 23 | #' title: Notebook Name 24 | #' --- 25 | 26 | #' **Originally Contributed by**: John Doe 27 | 28 | #' A Markdown Block 29 | 30 | print("Hello World") 31 | ``` 32 | 33 | Do have a look at the notes for handling specific cases. 34 | 35 | ## 2. Handle Additional Files 36 | 37 | In case a tutorial uses additional files (such as for reading data), 38 | it is recommended to download and add the files to this repository to prevent broken links in the future. 39 | The files must also be copied into the corresponding folders inside the `notebook` and `test` directories. 40 | In case of images which are used to be used when the file is converted into a Jupyter Notebook, 41 | it is sufficient to only have them in the `notebook` directory. 42 | 43 | ## 3. Use Weave 44 | 45 | To generate the Jupyter Notebook and a separate Julia file for testing, run the `weave_file` function. 46 | 47 | ```julia 48 | using JuMPTutorials 49 | JuMPTutorials.weave_file("subfolder_name","tutorial_name") 50 | ``` 51 | 52 | ## 4. Add Tests 53 | 54 | Add the file generated inside the `test` folder to the `runtests.jl` file using 55 | the [`include`](https://docs.julialang.org/en/v1/base/base/#Base.include) function. 56 | This tests if the notebook runs without any errors. 57 | To check if the results produced are correct, add your own tests below the `include` function using the `@test` macro. 58 | It is recommended to use ≈ (\approx) for equality checks due to differences in numerical accuracies of solvers. You'll also need to add dependencies in case you use any new package. 59 | 60 | ## 5. Add to README 61 | 62 | Finally, add the tutorial to the table of contents in the `README.md` file. 63 | We use nbviewer to render the notebooks as webpages. 64 | Link the name of the tutorial to a URL of the following form: 65 | 66 | `https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/subfolder_name/tutorial_name.ipynb` -------------------------------------------------------------------------------- /test/introduction/an_introduction_to_julia.jl: -------------------------------------------------------------------------------- 1 | 2 | println("Hello, World!") 3 | 4 | 5 | typeof(1 + -2) 6 | 7 | 8 | typeof(1.2 - 2.3) 9 | 10 | 11 | π 12 | 13 | 14 | typeof(π) 15 | 16 | 17 | typeof(2 + 3im) 18 | 19 | 20 | typeof("This is Julia") 21 | 22 | 23 | typeof("π is about 3.1415") 24 | 25 | 26 | :my_id 27 | typeof(:my_id) 28 | 29 | 30 | 1 + 1 31 | 32 | 33 | (2 + 1im) * (1 - 2im) 34 | 35 | 36 | sin(2π/3) == √3/2 37 | 38 | 39 | sin(2π/3) - √3/2 40 | 41 | 42 | sin(2π/3) ≈ √3/2 43 | 44 | 45 | 1 + 1e-16 == 1 46 | 47 | 48 | (1 + 1e-16) - 1e-16 == 1 + (1e-16 - 1e-16) 49 | 50 | 51 | b = [5, 6] 52 | 53 | 54 | A = [1 2; 3 4] 55 | 56 | 57 | x = A \ b 58 | 59 | 60 | A * x 61 | 62 | 63 | A * x == b 64 | 65 | 66 | @show b' * b 67 | @show b * b'; 68 | 69 | 70 | t = ("hello", 1.2, :foo) 71 | 72 | 73 | typeof(t) 74 | 75 | 76 | t[2] 77 | 78 | 79 | a, b, c = t 80 | b 81 | 82 | 83 | t = (word="hello", num=1.2, sym=:foo) 84 | 85 | 86 | t.word 87 | 88 | 89 | d1 = Dict(1 => "A", 2 => "B", 4 => "D") 90 | 91 | 92 | d1[2] 93 | 94 | 95 | Dict("A" => 1, "B" => 2.5, "D" => 2 - 3im) 96 | 97 | 98 | d2 = Dict("A" => 1, "B" => 2, "D" => Dict(:foo => 3, :bar => 4)) 99 | 100 | 101 | d2["B"] 102 | 103 | 104 | d2["D"][:foo] 105 | 106 | 107 | for i in 1:5 108 | println(i) 109 | end 110 | 111 | 112 | for i in [1.2, 2.3, 3.4, 4.5, 5.6] 113 | println(i) 114 | end 115 | 116 | 117 | for (key, value) in Dict("A" => 1, "B" => 2.5, "D" => 2 - 3im) 118 | println("$key: $value") 119 | end 120 | 121 | 122 | i = 10 123 | for i in 0:3:15 124 | if i < 5 125 | println("$(i) is less than 5") 126 | elseif i < 10 127 | println("$(i) is less than 10") 128 | else 129 | if i == 10 130 | println("the value is 10") 131 | else 132 | println("$(i) is bigger than 10") 133 | end 134 | end 135 | end 136 | 137 | 138 | [i for i in 1:5] 139 | 140 | 141 | [i*j for i in 1:5, j in 5:10] 142 | 143 | 144 | [i for i in 1:10 if i%2 == 1] 145 | 146 | 147 | Dict("$i" => i for i in 1:10 if i%2 == 1) 148 | 149 | 150 | function print_hello() 151 | println("hello") 152 | end 153 | print_hello() 154 | 155 | 156 | function print_it(x) 157 | println(x) 158 | end 159 | print_it("hello") 160 | print_it(1.234) 161 | print_it(:my_id) 162 | 163 | 164 | function print_it(x; prefix="value:") 165 | println("$(prefix) $x") 166 | end 167 | print_it(1.234) 168 | print_it(1.234, prefix="val:") 169 | 170 | 171 | function mult(x; y=2.0) 172 | return x * y 173 | end 174 | mult(4.0) 175 | 176 | 177 | mult(4.0, y=5.0) 178 | 179 | 180 | [1, 5, -2, 7] 181 | 182 | 183 | [1.0, 5.2, -2.1, 7] 184 | 185 | 186 | function mutability_example(mutable_type::Vector{Int}, immutable_type::Int) 187 | mutable_type[1] += 1 188 | immutable_type += 1 189 | return 190 | end 191 | 192 | mutable_type = [1, 2, 3] 193 | immutable_type = 1 194 | 195 | mutability_example(mutable_type, immutable_type) 196 | 197 | println("mutable_type: $(mutable_type)") 198 | println("immutable_type: $(immutable_type)") 199 | 200 | 201 | @show isimmutable([1, 2, 3]) 202 | @show isimmutable(1); 203 | 204 | 205 | using Random 206 | [rand() for i in 1:10] 207 | 208 | -------------------------------------------------------------------------------- /script/introduction/getting_started_with_JuMP.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Getting Started with JuMP 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' This tutorial is aimed at providing a quick introduction to writing JuMP code. It assumes familiar with basic optimization and 8 | #' the notion of an [AML](https://en.wikipedia.org/wiki/Algebraic_modeling_language). 9 | 10 | #' ## What is JuMP? 11 | #' JuMP ("Julia for Mathematical Programming") is an open-source modeling language that is embedded in Julia. It allows users to 12 | #' users formulate various classes of optimization problems (linear, mixed-integer, quadratic, conic quadratic, semidefinite, 13 | #' and nonlinear) with easy-to-read code. These problems can then be solved using state-of-the-art open-source and commercial solvers. 14 | #' JuMP also makes advanced optimization techniques easily accessible from a high-level language. 15 | 16 | #' ## Installing JuMP 17 | #' JuMP is a package for Julia. From Julia, JuMP is installed by using the built-in package manager. 18 | #+ tangle = false 19 | 20 | import Pkg 21 | Pkg.add("JuMP") 22 | 23 | #' ## A Complete Example 24 | #' Let's try to solve the following linear programming problem by using JuMP and GLPK (a linear and mixed integer programming 25 | #' solver). We will first look at the complete code to solve the problem and then go through it step by step. 26 | 27 | #' $$ 28 | #' \begin{align*} 29 | #' & \min & 12x + 20y \\ 30 | #' & \;\;\text{s.t.} & 6x + 8y \geq 100 \\ 31 | #' & & 7x + 12y \geq 120 \\ 32 | #' & & x \geq 0 \\ 33 | #' & & y \geq 0 \\ 34 | #' \end{align*} 35 | #' $$ 36 | 37 | using JuMP 38 | using GLPK 39 | 40 | model = Model(with_optimizer(GLPK.Optimizer)) 41 | @variable(model, x >= 0) 42 | @variable(model, y >= 0) 43 | @constraint(model, 6x + 8y >= 100) 44 | @constraint(model, 7x + 12y >= 120) 45 | @objective(model, Min, 12x + 20y) 46 | 47 | optimize!(model) 48 | 49 | @show value(x); 50 | @show value(y); 51 | @show objective_value(model); 52 | 53 | #' ## Step by Step JuMP Code 54 | #' Once JuMP is installed, to use JuMP in your programs, we just need to write- 55 | 56 | using JuMP 57 | 58 | #' We also need to include a Julia package which provides an appropriate solver. We want to use GLPK.Optimizer here which is 59 | #' provided by the GLPK.jl package. 60 | 61 | using GLPK 62 | 63 | #' A model object is a container for variables, constraints, solver options, etc. Models are created with the Model() function. 64 | #' The `with_optimizer` syntax is used to specify the optimizer to be used which is GLPK in this case. 65 | 66 | model = Model(with_optimizer(GLPK.Optimizer)); 67 | 68 | #' A variable is modelled using `@variable(name of the model object, variable name and bound, variable type)`. The bound can be a 69 | #' lower bound, an upper bound or both. If no variable type is defined, then it is treated as real. 70 | 71 | @variable(model, x >= 0) 72 | @variable(model, y >= 0); 73 | 74 | #' A constraint is modelled using `@constraint(name of the model object, constraint)`. 75 | 76 | @constraint(model, 6x + 8y >= 100) 77 | @constraint(model, 7x + 12y >= 120); 78 | 79 | #' The objective is set in a similar manner using `@objective(name of the model object, Min/Max, function to be optimized)` 80 | 81 | @objective(model, Min, 12x + 20y); 82 | 83 | #' To solve the optimization problem, we call the optimize function. 84 | 85 | optimize!(model) 86 | 87 | #' Let's now check the value of objective and variables. 88 | 89 | @show value(x); 90 | @show value(y); 91 | @show objective_value(model); -------------------------------------------------------------------------------- /test/modelling/rocket_control.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP, Ipopt 3 | 4 | # Create JuMP model, using Ipopt as the solver 5 | rocket = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 6 | 7 | # Constants 8 | # Note that all parameters in the model have been normalized 9 | # to be dimensionless. See the COPS3 paper for more info. 10 | h_0 = 1 # Initial height 11 | v_0 = 0 # Initial velocity 12 | m_0 = 1 # Initial mass 13 | g_0 = 1 # Gravity at the surface 14 | 15 | T_c = 3.5 # Used for thrust 16 | h_c = 500 # Used for drag 17 | v_c = 620 # Used for drag 18 | m_c = 0.6 # Fraction of initial mass left at end 19 | 20 | c = 0.5 * sqrt(g_0 * h_0) # Thrust-to-fuel mass 21 | m_f = m_c * m_0 # Final mass 22 | D_c = 0.5 * v_c * m_0 / g_0 # Drag scaling 23 | T_max = T_c * g_0 * m_0 # Maximum thrust 24 | 25 | n = 800 # Time steps 26 | 27 | @variables(rocket, begin 28 | Δt ≥ 0, (start = 1/n) # Time step 29 | # State variables 30 | v[1:n] ≥ 0 # Velocity 31 | h[1:n] ≥ h_0 # Height 32 | m_f ≤ m[1:n] ≤ m_0 # Mass 33 | # Control 34 | 0 ≤ T[1:n] ≤ T_max # Thrust 35 | end) 36 | 37 | # Objective: maximize altitude at end of time of flight 38 | @objective(rocket, Max, h[n]) 39 | 40 | # Initial conditions 41 | @constraints(rocket, begin 42 | v[1] == v_0 43 | h[1] == h_0 44 | m[1] == m_0 45 | m[n] == m_f 46 | end) 47 | 48 | # Forces 49 | # Drag(h,v) = Dc v^2 exp( -hc * (h - h0) / h0 ) 50 | @NLexpression(rocket, drag[j = 1:n], D_c * (v[j]^2) * exp(-h_c * (h[j] - h_0) / h_0)) 51 | # Grav(h) = go * (h0 / h)^2 52 | @NLexpression(rocket, grav[j = 1:n], g_0 * (h_0 / h[j])^2) 53 | # Time of flight 54 | @NLexpression(rocket, t_f, Δt * n) 55 | 56 | # Dynamics 57 | for j in 2:n 58 | # h' = v 59 | 60 | # Rectangular integration 61 | # @NLconstraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) 62 | 63 | # Trapezoidal integration 64 | @NLconstraint(rocket, 65 | h[j] == h[j - 1] + 0.5 * Δt * (v[j] + v[j - 1])) 66 | 67 | # v' = (T-D(h,v))/m - g(h) 68 | 69 | # Rectangular integration 70 | # @NLconstraint(rocket, v[j] == v[j - 1] + Δt *( 71 | # (T[j - 1] - drag[j - 1]) / m[j - 1] - grav[j - 1])) 72 | 73 | # Trapezoidal integration 74 | @NLconstraint(rocket, 75 | v[j] == v[j-1] + 0.5 * Δt * ( 76 | (T[j] - drag[j] - m[j] * grav[j]) / m[j] + 77 | (T[j - 1] - drag[j - 1] - m[j - 1] * grav[j - 1]) / m[j - 1])) 78 | 79 | # m' = -T/c 80 | 81 | # Rectangular integration 82 | # @NLconstraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) 83 | 84 | # Trapezoidal integration 85 | @NLconstraint(rocket, 86 | m[j] == m[j - 1] - 0.5 * Δt * (T[j] + T[j-1]) / c) 87 | end 88 | 89 | # Solve for the control and state 90 | println("Solving...") 91 | status = optimize!(rocket) 92 | 93 | # Display results 94 | # println("Solver status: ", status) 95 | println("Max height: ", objective_value(rocket)) 96 | 97 | 98 | # Can visualize the state and control variables 99 | using Gadfly 100 | 101 | 102 | h_plot = plot(x = (1:n) * value.(Δt), y = value.(h)[:], Geom.line, 103 | Guide.xlabel("Time (s)"), Guide.ylabel("Altitude")) 104 | m_plot = plot(x = (1:n) * value.(Δt), y = value.(m)[:], Geom.line, 105 | Guide.xlabel("Time (s)"), Guide.ylabel("Mass")) 106 | v_plot = plot(x = (1:n) * value.(Δt), y = value.(v)[:], Geom.line, 107 | Guide.xlabel("Time (s)"), Guide.ylabel("Velocity")) 108 | T_plot = plot(x = (1:n) * value.(Δt), y = value.(T)[:], Geom.line, 109 | Guide.xlabel("Time (s)"), Guide.ylabel("Thrust")) 110 | draw(SVG(6inch, 6inch), vstack(hstack(h_plot, m_plot), hstack(v_plot, T_plot))) 111 | 112 | -------------------------------------------------------------------------------- /test/modelling/problems_on_graphs.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using GLPK 4 | using GraphPlot 5 | using LightGraphs 6 | using Colors 7 | 8 | 9 | G = [ 10 | 0 1 0 0 0 0; 11 | 1 0 1 1 0 0; 12 | 0 1 0 0 1 1; 13 | 0 1 0 0 1 0; 14 | 0 0 1 1 0 0; 15 | 0 0 1 0 0 0 16 | ] 17 | 18 | g = SimpleGraph(G) 19 | 20 | gplot(g) 21 | 22 | 23 | vertex_cover = Model(with_optimizer(GLPK.Optimizer)) 24 | 25 | @variable(vertex_cover, y[1:nv(g)], Bin) 26 | @constraint(vertex_cover, [i = 1:nv(g), j = 1:nv(g); G[i,j] == 1], y[i] + y[j] >= 1) 27 | @objective(vertex_cover, Min, sum(y)) 28 | 29 | optimize!(vertex_cover) 30 | @show value.(y); 31 | 32 | 33 | membership = convert(Array{Int},value.(y)) # Change to Int 34 | membership = membership + ones(Int, nv(g)) # Make the color groups one indexed 35 | nodecolor = [colorant"red", colorant"blue"] # Blue to represent vertices in the cover 36 | nodefillc = nodecolor[membership] 37 | gplot(g, nodefillc = nodefillc) 38 | 39 | 40 | G = [ 41 | 0 1 0 0 0 0 0 0 0 1 0 ; 42 | 1 0 1 0 0 0 0 0 0 0 1; 43 | 0 1 0 1 0 1 0 0 0 0 0; 44 | 0 0 1 0 1 0 0 0 0 0 0; 45 | 0 0 0 1 0 1 0 0 0 0 0; 46 | 0 0 1 0 1 0 1 0 0 0 0; 47 | 0 0 0 0 0 1 0 1 0 0 0; 48 | 0 0 0 0 0 0 1 0 1 0 1; 49 | 0 0 0 0 0 0 0 1 0 1 1; 50 | 1 0 0 0 0 0 0 0 1 0 1; 51 | 0 1 0 0 0 0 0 1 1 1 0 52 | ] 53 | 54 | g = SimpleGraph(G) 55 | 56 | gplot(g) 57 | 58 | 59 | dominating_set = Model(with_optimizer(GLPK.Optimizer)) 60 | 61 | @variable(dominating_set, x[1:nv(g)], Bin) 62 | @constraint(dominating_set, [i = 1:nv(g)], sum(G[i,:] .* x) >= 1) 63 | @objective(dominating_set, Min, sum(x)) 64 | 65 | optimize!(dominating_set) 66 | @show value.(x); 67 | 68 | 69 | membership = convert(Array{Int},value.(x)) # Change to Int 70 | membership = membership + ones(Int, nv(g)) # Make the color groups one indexed 71 | nodecolor = [colorant"red", colorant"blue"] # Blue to represent vertices in the set 72 | nodefillc = nodecolor[membership] 73 | gplot(g, nodefillc = nodefillc) 74 | 75 | 76 | G = [ 77 | 0 0 0 0 1 0 0 0; 78 | 0 0 0 0 0 1 0 0; 79 | 0 0 0 0 0 0 1 0; 80 | 0 0 0 0 0 0 0 1; 81 | 1 0 0 0 0 1 0 1; 82 | 0 1 0 0 1 0 1 0; 83 | 0 0 1 0 0 1 0 1; 84 | 0 0 0 1 1 0 1 0; 85 | ] 86 | 87 | g = SimpleGraph(G) 88 | 89 | gplot(g) 90 | 91 | 92 | matching = Model(with_optimizer(GLPK.Optimizer)) 93 | 94 | @variable(matching, m[i = 1:nv(g), j = 1:nv(g)], Bin) 95 | @constraint(matching, [i = 1:nv(g)], sum(m[i,:]) <= 1) 96 | @constraint(matching, [i = 1:nv(g), j = 1:nv(g); G[i,j] == 0], m[i,j] == 0) 97 | @constraint(matching, [i = 1:nv(g), j = 1:nv(g)], m[i,j] == m[j,i]) 98 | @objective(matching, Max, sum(m)) 99 | 100 | optimize!(matching) 101 | @show value.(m); 102 | 103 | 104 | G = [ 105 | 0 1 0 0 1 1 0 0 0 0; 106 | 1 0 1 0 0 0 1 0 0 0; 107 | 0 1 0 1 0 0 0 1 0 0; 108 | 0 0 1 0 1 0 0 0 1 0; 109 | 1 0 0 1 0 0 0 0 0 1; 110 | 1 0 0 0 0 0 1 0 0 1; 111 | 0 1 0 0 0 1 0 1 0 0; 112 | 0 0 1 0 0 0 1 0 1 0; 113 | 0 0 0 1 0 0 0 1 0 1; 114 | 0 0 0 0 1 1 0 0 1 0; 115 | ] 116 | 117 | g = SimpleGraph(G) 118 | 119 | gplot(g) 120 | 121 | 122 | k = nv(g) 123 | 124 | k_colouring = Model(with_optimizer(GLPK.Optimizer)) 125 | 126 | @variable(k_colouring, z[1:k], Bin) 127 | @variable(k_colouring, c[1:nv(g),1:k], Bin) 128 | @constraint(k_colouring, [i = 1:nv(g)], sum(c[i,:]) == 1) 129 | @constraint(k_colouring, [i = 1:nv(g), j = 1:nv(g), l = 1:k; G[i,j] == 1], c[i,l] + c[j,l] <= 1) 130 | @constraint(k_colouring, [i = 1:nv(g), l = 1:k], c[i,l] <= z[l]) 131 | 132 | @objective(k_colouring, Min, sum(z)) 133 | 134 | optimize!(k_colouring) 135 | @show value.(z); 136 | @show value.(c); 137 | 138 | 139 | c = value.(c) 140 | membership = zeros(nv(g)) 141 | for i in 1:nv(g) 142 | for j in 1:k 143 | if c[i,j] == 1 144 | membership[i] = j 145 | break 146 | end 147 | end 148 | end 149 | membership = convert(Array{Int},membership) 150 | 151 | nodecolor = distinguishable_colors(nv(g), colorant"green") 152 | nodefillc = nodecolor[membership] 153 | gplot(g, nodefillc = nodefillc) 154 | 155 | -------------------------------------------------------------------------------- /script/using_JuMP/problem_modification.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Problem Modification 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' This tutorial deals with how to modify models after they have been created and solved. 8 | #' This functionality can be useful, for example, 9 | #' when we are solving many similar models in succession or generating the model dynamically. 10 | #' Additionally it is sometimes desirable for the solver to re-start from the last solution to 11 | #' reduce running times for successive solves (“hot-start”). 12 | 13 | using JuMP 14 | 15 | #' ## Modifying Variables 16 | 17 | model = Model() 18 | @variable(model, x); 19 | 20 | #' ### Variable Bounds 21 | 22 | #' The `set_lower_bound` and `set_upper_bound` functions can be used to create as well as 23 | #' modify an existing variable bound. 24 | 25 | set_lower_bound(x, 3) 26 | lower_bound(x) 27 | 28 | #+ 29 | 30 | set_lower_bound(x, 2) 31 | lower_bound(x) 32 | 33 | #' We can delete variable bounds using the `delete_lower_bound` and `delete_upper_bound` functions. 34 | 35 | delete_lower_bound(x) 36 | has_lower_bound(x) 37 | 38 | #' We can assign a fixed value to a variable using `fix`. 39 | 40 | fix(x, 5) 41 | fix_value(x) 42 | 43 | #' However, fixing a variable with existing bounds will throw an error. 44 | 45 | @variable(model, y >= 0); 46 | 47 | #+ tangle = false 48 | 49 | fix(y, 2) 50 | 51 | #' As we can see in the error message above, 52 | #' we have to specify to JuMP that we wish to forcefuly remove the bound. 53 | 54 | fix(y, 2; force = true) 55 | fix_value(y) 56 | 57 | #' We can also call the `unfix` function to remove the fixed value. 58 | 59 | unfix(x) 60 | is_fixed(x) 61 | 62 | #' ### Deleting Variables 63 | 64 | #' The `all_variables` function returns a list of all variables present in the model. 65 | all_variables(model) 66 | 67 | #' To delete variables from a model, we can use the `delete` function. 68 | 69 | delete(model, x) 70 | all_variables(model) 71 | 72 | #' We can also check whether a variable is a valid JuMP variable in a model using the `is_valid` function. 73 | 74 | is_valid(model, x) 75 | 76 | 77 | #' ## Modifying Constraints 78 | 79 | model = Model() 80 | @variable(model, x); 81 | 82 | #' ### Modifying a Variable Coefficient 83 | #' It is also possible to modify the scalar coefficients 84 | #' (but notably not yet the quadratic coefficients) using the `set_normalized_coefficient` function. 85 | 86 | @constraint(model, con, 2x <= 1); 87 | 88 | #+ 89 | 90 | set_normalized_coefficient(con, x, 3) 91 | con 92 | 93 | #' ### Deleting a Constraint 94 | #' Just like for deleting variables, we can use the `delete` function for constraints as well. 95 | 96 | delete(model, con) 97 | is_valid(model, con) 98 | 99 | #' ## Modifying the Objective 100 | 101 | model = Model() 102 | @variable(model, x) 103 | @objective(model, Min, 7x + 4); 104 | 105 | #' The function `objective_function` is used to query the objective of a model. 106 | 107 | objective_function(model) 108 | 109 | #' `objective_sense` is similarily used to query the objective sense of a model. 110 | 111 | objective_sense(model) 112 | 113 | #' To easiest way to change the objective is to simply call `@objective` again 114 | #' - the previous objective function and sense will be replaced. 115 | 116 | @objective(model, Max, 8x + 3) 117 | objective_function(model) 118 | 119 | #+ 120 | 121 | objective_sense(model) 122 | 123 | #' Another way is to change the objective is to 124 | #' use the low-level functions `set_objective_function` and `set_objective_sense`. 125 | 126 | set_objective_function(model, 5x + 11) 127 | objective_function(model) 128 | 129 | #+ 130 | 131 | set_objective_sense(model, MOI.MIN_SENSE) 132 | objective_sense(model) 133 | 134 | #' Note that we can't use the Min and Max shortcuts here as `set_objective_sense` is a low level function. -------------------------------------------------------------------------------- /src/JuMPTutorials.jl: -------------------------------------------------------------------------------- 1 | module JuMPTutorials 2 | 3 | using Weave 4 | 5 | const repo_directory = joinpath(@__DIR__,"..") 6 | 7 | """ 8 | weave_file(folder::AbstractString, file::AbstractString) 9 | 10 | Checks if the file has been modified since the last Weave and updates the notebook and tests accordingly. 11 | 12 | * `folder` = Name of the folder the tutorial is in 13 | * `file` = Name of the tutorial file 14 | """ 15 | function weave_file(folder,file) 16 | cd(joinpath(repo_directory,"script",folder)) 17 | 18 | filename = split(file, ".")[1] 19 | srcpath = joinpath(repo_directory, "script", folder, file) 20 | testpath = joinpath(repo_directory, "test", folder, file) 21 | notebookpath = joinpath(repo_directory, "notebook", folder) 22 | 23 | if mtime(srcpath) > mtime(testpath) || mtime(testpath)==0 24 | @warn "Updating tests for $filename as it has been updated since the last weave." 25 | tangle(srcpath, out_path=testpath) 26 | else 27 | @warn "Skipping tests for $filename as it has not been updated." 28 | end 29 | 30 | if mtime(srcpath) > mtime(notebookpath) || mtime(notebookpath)==0 31 | @warn "Weaving $filename to Jupyter Notebook as it has been updated since the last weave." 32 | notebook(srcpath, out_path=notebookpath, timeout=-1, nbconvert_options="--allow-errors") 33 | else 34 | @warn "Skipping Jupyter Notebook for $filename as it has not been updated." 35 | end 36 | 37 | cd(joinpath(repo_directory,"src")) 38 | end 39 | 40 | """ 41 | weave_file(folder::AbstractString) 42 | 43 | Checks the files present in the specified folder for modifications and updates the notebook and tests accordingly. 44 | 45 | * `folder` = Name of the folder to check 46 | """ 47 | function weave_folder(folder) 48 | for file in readdir(joinpath(repo_directory,"script",folder)) 49 | println("") 50 | println("Building $(joinpath(folder,file))") 51 | try 52 | weave_file(folder,file) 53 | catch 54 | end 55 | println("") 56 | end 57 | end 58 | 59 | """ 60 | weave_all() 61 | 62 | Checks every tutorial for modifications and updates the notebook and tests accordingly. 63 | """ 64 | function weave_all() 65 | for folder in readdir(joinpath(repo_directory,"script")) 66 | weave_folder(folder) 67 | end 68 | end 69 | 70 | """ 71 | weave_file_f(folder::AbstractString, file::AbstractString) 72 | 73 | Use Weave to convert the given file irrespective of whether it has been modified or not. 74 | 75 | * `folder` = Name of the folder the tutorial is in 76 | * `file` = Name of the tutorial file 77 | """ 78 | function weave_file_f(folder,file) 79 | cd(joinpath(repo_directory,"script",folder)) 80 | 81 | srcpath = joinpath(repo_directory, "script", folder, file) 82 | testpath = joinpath(repo_directory, "test", folder, file) 83 | notebookpath = joinpath(repo_directory, "notebook", folder) 84 | 85 | tangle(srcpath, out_path = testpath) 86 | notebook(srcpath, notebookpath, -1, "--allow-errors") 87 | 88 | cd(joinpath(repo_directory,"src")) 89 | end 90 | 91 | """ 92 | `weave_folder_f(folder::AbstractString, file::AbstractString)` 93 | 94 | Use Weave to convert every file in the specified folder irrespective of whether it has been modified or not. 95 | 96 | * `folder` = Name of the folder the tutorial is in 97 | """ 98 | function weave_folder_f(folder) 99 | for file in readdir(joinpath(repo_directory,"script",folder)) 100 | println("") 101 | println("Building $(joinpath(folder,file))") 102 | try 103 | weave_file_f(folder,file) 104 | catch 105 | end 106 | println("") 107 | end 108 | end 109 | 110 | """ 111 | `weave_all_f()` 112 | 113 | Use Weave to convert every tutorial irrespective of whether it has been modified or not. 114 | """ 115 | function weave_all_f() 116 | for folder in readdir(joinpath(repo_directory,"script")) 117 | weave_folder_f(folder) 118 | end 119 | end 120 | 121 | end -------------------------------------------------------------------------------- /test/optimization_concepts/benders_decomposition.jl: -------------------------------------------------------------------------------- 1 | 2 | c1 = [-1; -4] 3 | c2 = [-2; -3] 4 | 5 | dim_x = length(c1) 6 | dim_u = length(c2) 7 | 8 | b = [-2; -3] 9 | 10 | A1 = [1 -3; 11 | -1 -3] 12 | A2 = [1 -2; 13 | -1 -1] 14 | 15 | M = 1000; 16 | 17 | 18 | # Loading the necessary packages 19 | #------------------------------- 20 | using JuMP 21 | using GLPK 22 | using LinearAlgebra 23 | 24 | # Master Problem Description 25 | # -------------------------- 26 | 27 | master_problem_model = Model(with_optimizer(GLPK.Optimizer)) 28 | 29 | # Variable Definition 30 | # ---------------------------------------------------------------- 31 | @variable(master_problem_model, 0 <= x[1:dim_x] <= 1e6, Int) 32 | @variable(master_problem_model, t <= 1e6) 33 | 34 | # Objective Setting 35 | # ----------------- 36 | @objective(master_problem_model, Max, t) 37 | global iter_num = 1 38 | 39 | print(master_problem_model) 40 | 41 | 42 | iter_num = 1 43 | 44 | while(true) 45 | println("\n-----------------------") 46 | println("Iteration number = ", iter_num) 47 | println("-----------------------\n") 48 | println("The current master problem is") 49 | print(master_problem_model) 50 | 51 | optimize!(master_problem_model) 52 | 53 | t_status = termination_status(master_problem_model) 54 | p_status = primal_status(master_problem_model) 55 | 56 | if p_status == MOI.INFEASIBLE_POINT 57 | println("The problem is infeasible :-(") 58 | break 59 | end 60 | 61 | if t_status == MOI.INFEASIBLE_OR_UNBOUNDED 62 | fm_current = M 63 | x_current = M * ones(dim_x) 64 | end 65 | 66 | if p_status == MOI.FEASIBLE_POINT 67 | fm_current = value(t) 68 | x_current = Float64[] 69 | for i in 1:dim_x 70 | push!(x_current, value(x[i])) 71 | end 72 | end 73 | 74 | println("Status of the master problem is", t_status, 75 | "\nwith fm_current = ", fm_current, 76 | "\nx_current = ", x_current) 77 | 78 | sub_problem_model = Model(with_optimizer(GLPK.Optimizer)) 79 | 80 | c_sub = b - A1 * x_current 81 | 82 | @variable(sub_problem_model, u[1:dim_u] >= 0) 83 | 84 | @constraint(sub_problem_model, constr_ref_subproblem[j = 1:size(A2, 2)], sum(A2[i, j] * u[i] for i in 1:size(A2, 1)) >= c2[j]) 85 | # The second argument of @constraint macro, constr_ref_subproblem[j=1:size(A2,2)] means that the j-th constraint is 86 | # referenced by constr_ref_subproblem[j]. 87 | 88 | @objective(sub_problem_model, Min, dot(c1, x_current) + sum(c_sub[i] * u[i] for i in 1:dim_u)) 89 | 90 | print("\nThe current subproblem model is \n", sub_problem_model) 91 | 92 | optimize!(sub_problem_model) 93 | 94 | t_status_sub = termination_status(sub_problem_model) 95 | p_status_sub = primal_status(sub_problem_model) 96 | 97 | fs_x_current = objective_value(sub_problem_model) 98 | 99 | u_current = Float64[] 100 | 101 | for i in 1:dim_u 102 | push!(u_current, value(u[i])) 103 | end 104 | 105 | γ = dot(b, u_current) 106 | 107 | println("Status of the subproblem is ", t_status_sub, 108 | "\nwith fs_x_current = ", fs_x_current, 109 | "\nand fm_current = ", fm_current) 110 | 111 | if p_status_sub == MOI.FEASIBLE_POINT && fs_x_current == fm_current # we are done 112 | println("\n################################################") 113 | println("Optimal solution of the original problem found") 114 | println("The optimal objective value t is ", fm_current) 115 | println("The optimal x is ", x_current) 116 | println("The optimal v is ", dual.(constr_ref_subproblem)) 117 | println("################################################\n") 118 | break 119 | end 120 | 121 | if p_status_sub == MOI.FEASIBLE_POINT && fs_x_current < fm_current 122 | println("\nThere is a suboptimal vertex, add the corresponding constraint") 123 | cv = A1' * u_current - c1 124 | @constraint(master_problem_model, t + sum(cv[i] * x[i] for i in 1:dim_x) <= γ) 125 | println("t + ", cv, "ᵀ x <= ", γ) 126 | end 127 | 128 | if t_status_sub == MOI.INFEASIBLE_OR_UNBOUNDED 129 | println("\nThere is an extreme ray, adding the corresponding constraint") 130 | ce = A1'* u_current 131 | @constraint(master_problem_model, sum(ce[i] * x[i] for i in 1:dim_x) <= γ) 132 | println(ce, "ᵀ x <= ", γ) 133 | end 134 | 135 | global iter_num += 1 136 | end 137 | 138 | -------------------------------------------------------------------------------- /script/modelling/sudoku.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Sudoku 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Iain Dunning 6 | 7 | #' Partially Solved Sudoku 8 | #'

A partially solved Sudoku puzzle

9 | 10 | #' 11 | #' [Sudoku](http://en.wikipedia.org/wiki/Sudoku) is a popular number puzzle. The goal is to place the digits 1,...,9 on a nine-by-nine grid, with some of the digits already filled in. Your solution must satisfy the following rules: 12 | #' 13 | #' * The numbers 1 to 9 must appear in each 3x3 square 14 | #' * The numbers 1 to 9 must appear in each row 15 | #' * The numbers 1 to 9 must appear in each column 16 | #' 17 | #' This isn't an optimization problem, its actually a *feasibility* problem: we wish to find a feasible solution that satsifies these rules. You can think of it as an optimization problem with an objective of 0. 18 | #' 19 | #' We can model this problem using 0-1 integer programming: a problem where all the decision variables are binary. We'll use JuMP to create the model, and then we can solve it with any integer programming solver. 20 | 21 | using JuMP 22 | using GLPK 23 | 24 | #' We will define a binary variable (a variable that is either 0 or 1) for each possible number in each possible cell. The meaning of each variable is as follows: 25 | #' 26 | #' x[i,j,k] = 1 if and only if cell (i,j) has number k 27 | #' 28 | #' where `i` is the row and `j` is the column. 29 | 30 | # Create a model 31 | sudoku = Model(with_optimizer(GLPK.Optimizer)) 32 | 33 | # Create our variables 34 | @variable(sudoku, x[i=1:9, j=1:9, k=1:9], Bin); 35 | 36 | #' Now we can begin to add our constraints. We'll actually start with something obvious to us as humans, but what we need to enforce: that there can be only one number per cell. 37 | 38 | for i = 1:9, j = 1:9 # Each row and each column 39 | # Sum across all the possible digits 40 | # One and only one of the digits can be in this cell, 41 | # so the sum must be equal to one 42 | @constraint(sudoku, sum(x[i,j,k] for k in 1:9) == 1) 43 | end 44 | 45 | #' Next we'll add the constraints for the rows and the columns. These constraints are all very similar, so much so that we can actually add them at the same time. 46 | 47 | for ind = 1:9 # Each row, OR each column 48 | for k = 1:9 # Each digit 49 | # Sum across columns (j) - row constraint 50 | @constraint(sudoku, sum(x[ind,j,k] for j in 1:9) == 1) 51 | # Sum across rows (i) - column constraint 52 | @constraint(sudoku, sum(x[i,ind,k] for i in 1:9) == 1) 53 | end 54 | end 55 | 56 | #' Finally, we have the to enforce the constraint that each digit appears once in each of the nine 3x3 sub-grids. Our strategy will be to index over the top-left corners of each 3x3 square with `for` loops, then sum over the squares. 57 | 58 | for i = 1:3:7, j = 1:3:7, k = 1:9 59 | # i is the top left row, j is the top left column 60 | # We'll sum from i to i+2, e.g. i=4, r=4, 5, 6 61 | @constraint(sudoku, sum(x[r,c,k] for r in i:i+2, c in j:j+2) == 1) 62 | end 63 | 64 | #' The final step is to add the initial solution as a set of constraints. We'll solve the problem that is in the picture at the start of the notebook. We'll put a `0` if there is no digit in that location. 65 | 66 | # The given digits 67 | init_sol = [ 5 3 0 0 7 0 0 0 0; 68 | 6 0 0 1 9 5 0 0 0; 69 | 0 9 8 0 0 0 0 6 0; 70 | 8 0 0 0 6 0 0 0 3; 71 | 4 0 0 8 0 3 0 0 1; 72 | 7 0 0 0 2 0 0 0 6; 73 | 0 6 0 0 0 0 2 8 0; 74 | 0 0 0 4 1 9 0 0 5; 75 | 0 0 0 0 8 0 0 7 9] 76 | for i = 1:9, j = 1:9 77 | # If the space isn't empty 78 | if init_sol[i,j] != 0 79 | # Then the corresponding variable for that digit 80 | # and location must be 1 81 | @constraint(sudoku, x[i,j,init_sol[i,j]] == 1) 82 | end 83 | end 84 | 85 | # solve problem 86 | optimize!(sudoku) 87 | 88 | # Extract the values of x 89 | x_val = value.(x) 90 | # Create a matrix to store the solution 91 | sol = zeros(Int,9,9) # 9x9 matrix of integers 92 | for i in 1:9, j in 1:9, k in 1:9 93 | # Integer programs are solved as a series of linear programs 94 | # so the values might not be precisely 0 and 1. We can just 95 | # round them to the nearest integer to make it easier 96 | if round(Int,x_val[i,j,k]) == 1 97 | sol[i,j] = k 98 | end 99 | end 100 | # Display the solution 101 | sol 102 | 103 | #' Which is the correct solution: 104 | #' Fully Solved Sudoku -------------------------------------------------------------------------------- /notebook/modelling/img/g1.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | %3 11 | 12 | 13 | 1 14 | 15 | 1 16 | 17 | 18 | 2 19 | 20 | 2 21 | 22 | 23 | 1->2 24 | 25 | 26 | 100 27 | 28 | 29 | 3 30 | 31 | 3 32 | 33 | 34 | 1->3 35 | 36 | 37 | 30 38 | 39 | 40 | 2->3 41 | 42 | 43 | 20 44 | 45 | 46 | 4 47 | 48 | 4 49 | 50 | 51 | 3->4 52 | 53 | 54 | 10 55 | 56 | 57 | 5 58 | 59 | 5 60 | 61 | 62 | 3->5 63 | 64 | 65 | 60 66 | 67 | 68 | 4->2 69 | 70 | 71 | 15 72 | 73 | 74 | 4->5 75 | 76 | 77 | 50 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /test/runtests.jl: -------------------------------------------------------------------------------- 1 | using JuMPTutorials 2 | using Test 3 | 4 | cd("introduction") 5 | @testset "Introduction" begin 6 | include("introduction/an_introduction_to_julia.jl") 7 | include("introduction/getting_started_with_JuMP.jl") 8 | @test objective_value(model) ≈ 205 9 | include("introduction/variables_constraints_objective.jl") 10 | @test objective_value(vector_model) ≈ 4.9230769230769225 11 | include("introduction/solvers_and_solutions.jl") 12 | end 13 | cd("..") 14 | 15 | cd("using_JuMP") 16 | @testset "Using JuMP" begin 17 | include("using_JuMP/working_with_data_files.jl") 18 | @test objective_value(model) == 23 19 | @test countryindex == [1, 5, 9, 10, 38, 39, 55, 63, 64, 75, 78, 81, 89, 104, 107, 130, 138, 158, 162, 167, 182, 188, 190] 20 | include("using_JuMP/problem_modification.jl") 21 | include("using_JuMP/nonlinear_modelling.jl") 22 | @test value(μ) ≈ 0 23 | @test value(σ)^2 ≈ 1 24 | end 25 | cd("..") 26 | 27 | cd("optimization_concepts") 28 | @testset "Optimization Concepts" begin 29 | include("optimization_concepts/benders_decomposition.jl") 30 | @test objective_value(master_problem_model) == -4.0 31 | @test value.(x) == [0.0, 1.0] 32 | include("optimization_concepts/integer_programming.jl") 33 | include("optimization_concepts/conic_programming.jl") 34 | end 35 | cd("..") 36 | 37 | cd("modelling") 38 | @testset "Modelling Examples" begin 39 | include("modelling/sudoku.jl") 40 | @test sol == [ 41 | 5 3 4 6 7 8 9 1 2; 42 | 6 7 2 1 9 5 3 4 8; 43 | 1 9 8 3 4 2 5 6 7; 44 | 8 5 9 7 6 1 4 2 3; 45 | 4 2 6 8 5 3 7 9 1; 46 | 7 1 3 9 2 4 8 5 6; 47 | 9 6 1 5 3 7 2 8 4; 48 | 2 8 7 4 1 9 6 3 5; 49 | 3 4 5 2 8 6 1 7 9] 50 | include("modelling/problems_on_graphs.jl") 51 | @test value.(y) == [0.0, 1.0, 1.0, 1.0, 0.0, 0.0] 52 | @test value.(x) == [0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] 53 | @test value.(m) == [ 54 | 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 55 | 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 56 | 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 57 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 58 | 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 59 | 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 60 | 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 61 | 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0] 62 | @test value.(z) == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0] 63 | @test c == [ 64 | 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 65 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 66 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 67 | 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 68 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 69 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 70 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 71 | 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 72 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 73 | 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0] 74 | include("modelling/network_flows.jl") 75 | @test objective_value(shortest_path) == 55.0 76 | @test value.(x) == [ 77 | 0.0 0.0 1.0 0.0 0.0; 78 | 0.0 0.0 0.0 0.0 0.0; 79 | 0.0 0.0 0.0 1.0 0.0; 80 | 0.0 1.0 0.0 0.0 0.0; 81 | 0.0 0.0 0.0 0.0 0.0] 82 | @test objective_value(assignment) == 20.0 83 | @test value.(y) == [0.0 1.0 0.0 0.0; 0.0 0.0 1.0 0.0; 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0] 84 | @test objective_value(max_flow) == 6.0 85 | @test value.(f) == [ 86 | 0.0 3.0 2.0 1.0 0.0 0.0 0.0 0.0; 87 | 0.0 0.0 0.0 0.0 3.0 0.0 0.0 0.0; 88 | 0.0 0.0 0.0 0.0 1.0 0.0 1.0 0.0; 89 | 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 90 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 4.0; 91 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 92 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 93 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] 94 | include("modelling/finance.jl") 95 | @test objective_value(financing) ≈ 92.49694915254233 96 | @test objective_value(auction) == 21.0 97 | @test value.(y) == [1.0, 1.0, 1.0, 0.0, 0.0, 0.0] 98 | @test objective_value(portfolio) ≈ 22634.41784988414 99 | @test value.(x) ≈ [497.0455298498642, 0.0, 502.95448015948074] 100 | include("modelling/power_systems.jl") 101 | @test g_opt == [1000.0, 300.0] 102 | @test w_opt == 200.0 103 | @test w_f - w_opt == 0 104 | @test obj == 90000.0 105 | include("modelling/geometric_problems.jl") 106 | @test value.(p) ≈ [ 107 | 0.44790964261631827 0.0468981793661497; 108 | -0.03193526635198919 -0.6706136210384356; 109 | 0.404335805799056 -0.45130815913688466; 110 | -0.39429534726904925 -0.13282535401213758; 111 | 0.02532703978422118 0.4124207687120701; 112 | -0.0016520566419420052 -0.43954821308159137; 113 | 1.0 1.0; 114 | 1.0 -1.0; 115 | -1.0 -1.0; 116 | -1.0 1.0; 117 | 1.0 -0.5; 118 | -1.0 -0.2; 119 | -0.2 -1.0; 120 | 0.1 1.0] 121 | include("modelling/rocket_control.jl") 122 | @test objective_value(rocket) ≈ 1.0128340648308058 123 | end 124 | cd("..") -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # JuMPTutorials.jl 2 | 3 | [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](http://numfocus.org) 4 | [![Build Status](https://travis-ci.com/JuliaOpt/JuMPTutorials.jl.svg?branch=master)](https://travis-ci.com/JuliaOpt/JuMPTutorials.jl) 5 | [![](https://img.shields.io/badge/docs-dev-blue.svg)](https://www.juliaopt.org/JuMPTutorials.jl/dev/) 6 | 7 | 8 | This repository contains tutorials on JuMP, a domain-specific modeling language for [mathematical optimization](http://en.wikipedia.org/wiki/Mathematical_optimization) embedded in [Julia](http://julialang.org/). Tutorials can be viewed in the form of webpages, and interactive Jupyter notebooks. This set of tutorials is made to complement the documentation by providing practical examples of the concepts. For more details, please consult the [JuMP documentation](https://www.juliaopt.org/JuMP.jl/v0.20/). 9 | 10 | These tutorials are currently under development as a part of a Google Summer of Code [project](https://summerofcode.withgoogle.com/projects/#5903911565656064). The current list of tutorials that are planned can be viewed at the following [issue](https://github.com/JuliaOpt/JuMPTutorials.jl/issues/1). If there is a tutorial you would like to request, please add a comment to the above issue. Any other suggestions are welcome as well. 11 | 12 | There are also some older notebooks available at [juliaopt-notebooks](https://github.com/JuliaOpt/juliaopt-notebooks) repository. Most of these were built using prior versions of JuMP and may not function correctly, but they can assist in implementing some concepts. There are also some code examples available in the main [JuMP repo](https://github.com/JuliaOpt/JuMP.jl/tree/release-0.19/examples). 13 | 14 | ## Run Notebooks in the Browser 15 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/JuliaOpt/JuMPTutorials.jl/master) 16 | 17 | To try out any of the tutorials in the browser without downloading Julia, click on the launch binder button above. Note that this functionality only supports open-source solvers which do not have additional requirements (for e.g. BLAS or MATLAB). This is also very slow and can take several minutes to start as it has to first install Julia and all the dependencies. Thus, you should download and run the notebooks on your PC for the best experience. 18 | 19 | ## Table of Contents 20 | 21 | - Introduction 22 | - [An Introduction to Julia](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/introduction/an_introduction_to_julia.ipynb) 23 | - [Getting Started with JuMP](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/introduction/getting_started_with_JuMP.ipynb) 24 | - [Variables, Constraints and Objective](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/introduction/variables_constraints_objective.ipynb) 25 | - [Solvers and Solutions](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/introduction/solvers_and_solutions.ipynb) 26 | - Using JuMP 27 | - [Working with Data Files](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/using_JuMP/working_with_data_files.ipynb) 28 | - [Problem Modification](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/using_JuMP/problem_modification.ipynb) 29 | - [Nonlinear Modelling](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/using_JuMP/nonlinear_modelling.ipynb) 30 | - Optimization Concepts 31 | - [Integer Programming](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/optimization_concepts/integer_programming.ipynb) 32 | - [Conic Programming](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/optimization_concepts/conic_programming.ipynb) 33 | - [Benders Decomposition](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/optimization_concepts/benders_decomposition.ipynb) 34 | - Modelling Examples 35 | - [Sudoku](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/sudoku.ipynb) 36 | - [Problems on Graphs](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/problems_on_graphs.ipynb) 37 | - [Network Flows](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/network_flows.ipynb) 38 | - [Finance](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/finance.ipynb) 39 | - [Power Systems](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/power_systems.ipynb) 40 | - [Geometric Problems](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/geometric_problems.ipynb) 41 | - [Experiment Design](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/experiment_design.ipynb) 42 | - [Rocket Control](https://nbviewer.jupyter.org/github/JuliaOpt/JuMPTutorials.jl/blob/master/notebook/modelling/rocket_control.ipynb) -------------------------------------------------------------------------------- /script/modelling/network_flows.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Network Flows 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' In graph theory, a flow network (also known as a transportation network) is a directed graph where 8 | #' each edge has a capacity and each edge receives a flow. The amount of flow on an edge cannot exceed the capacity of the edge. 9 | #' Often in operations research, a directed graph is called a network, the vertices are called nodes and the edges are called arcs. 10 | #' A flow must satisfy the restriction that the amount of flow into a node equals the amount of flow out of it, 11 | #' unless it is a source, which has only outgoing flow, or sink, which has only incoming flow. 12 | #' A network can be used to model traffic in a computer network, circulation with demands, fluids in pipes, 13 | #' currents in an electrical circuit, or anything similar in which something travels through a network of nodes. 14 | 15 | using JuMP 16 | using GLPK 17 | 18 | #' ## The Shortest Path Problem 19 | #' Suppose that each arc $(i, j)$ of a graph is assigned a scalar cost $a_{i,j}$, and 20 | #' suppose that we define the cost of a forward path to be the sum of the costs of its arcs. 21 | #' Given a pair of nodes, the shortest path problem is to find a forward path that connects these nodes and has minimum cost. 22 | 23 | #' $$ 24 | #' \begin{align*} 25 | #' \min && \sum_{\forall e(i,j) \in E} a_{i,j} \times x_{i,j} \\ 26 | #' s.t. && b(i) = \sum_j x_{ij} - \sum_k x_{ki} = \begin{cases} 27 | #' 1 &\mbox{if $i$ is the starting node,} \\ 28 | #' -1 &\mbox{if $i$ is the ending node,} \\ 29 | #' 0 &\mbox{otherwise.} \end{cases} \\ 30 | #' && x_{e} \in \{0,1\} && \forall e \in E 31 | #' \end{align*} 32 | #' $$ 33 | 34 | #' Flow Network 1 35 | 36 | G = [ 37 | 0 100 30 0 0; 38 | 0 0 20 0 0; 39 | 0 0 0 10 60; 40 | 0 15 0 0 50; 41 | 0 0 0 0 0 42 | ] 43 | 44 | n = size(G)[1] 45 | 46 | shortest_path = Model(with_optimizer(GLPK.Optimizer)) 47 | 48 | @variable(shortest_path, x[1:n,1:n], Bin) 49 | @constraint(shortest_path, [i = 1:n, j = 1:n; G[i,j] == 0], x[i,j] == 0) # Arcs with zero cost are not a part of the path as they do no exist 50 | @constraint(shortest_path, [i = 1:n; i != 1 && i != 2], sum(x[i,:]) == sum(x[:,i])) # Flow conservation constraint 51 | @constraint(shortest_path, sum(x[1,:]) - sum(x[:,1]) == 1) # Flow coming out of source = 1 52 | @constraint(shortest_path, sum(x[2,:]) - sum(x[:,2]) == -1) # Flowing coming out of destination = -1 i.e. Flow entering destination = 1 53 | @objective(shortest_path, Min, sum(G .* x)) 54 | 55 | optimize!(shortest_path) 56 | @show objective_value(shortest_path); 57 | @show value.(x); 58 | 59 | #' ## The Assignment Problem 60 | #' Suppose that there are $n$ persons and $n$ objects that we have to match on a one-to-one basis. 61 | #' There is a benefit or value $a_{i,j}$ for matching person $i$ with object $j$, and 62 | #' we want to assign persons to objects so as to maximize the total benefit. 63 | #' There is also a restriction that person $i$ can be assigned to object $j$ only if $(i, j)$ belongs to a given set of pairs $A$. 64 | #' Mathematically, we want to find a set of person-object pairs $(1, j_{1}),..., (n, j_{n})$ from $A$ such that 65 | #' the objects $j_{1},...,j_{n}$ are all distinct, and the total benefit $\sum_{i=1}^{y} a_{ij_{i}}$ is maximized. 66 | 67 | #' $$ 68 | #' \begin{align*} 69 | #' \max && \sum_{(i,j) \in A} a_{i,j} \times y_{i,j} \\ 70 | #' s.t. && \sum_{\{j|(i,j) \in A\}} y_{i,j} = 1 && \forall i = \{1,2....n\} \\ 71 | #' && \sum_{\{i|(i,j) \in A\}} y_{i,j} = 1 && \forall j = \{1,2....n\} \\ 72 | #' && y_{i,j} \in \{0,1\} && \forall (i,j) \in \{1,2...k\} 73 | #' \end{align*} 74 | #' $$ 75 | 76 | #' Flow Network 2 77 | 78 | G = [ 79 | 6 4 5 0; 80 | 0 3 6 0; 81 | 5 0 4 3; 82 | 7 5 5 5; 83 | ] 84 | 85 | n = size(G)[1] 86 | 87 | assignment = Model(with_optimizer(GLPK.Optimizer)) 88 | @variable(assignment, y[1:n,1:n], Bin) 89 | @constraint(assignment, [i = 1:n], sum(y[:,i]) == 1) # One person can only be assigned to one object 90 | @constraint(assignment, [j = 1:n], sum(y[j,:]) == 1) # One object can only be assigned to one person 91 | @objective(assignment, Max, sum(G .* y)) 92 | 93 | optimize!(assignment) 94 | @show objective_value(assignment); 95 | @show value.(y); 96 | 97 | #' ## The Max-Flow Problem 98 | #' In the max-flow problem, we have a graph with two special nodes: the $source$, denoted by $s$, and the $sink$, denoted by $t$. 99 | #' The objective is to move as much flow as possible from $s$ into $t$ while observing the capacity constraints. 100 | 101 | #' $$ 102 | #' \begin{align*} 103 | #' \max && \sum_{v:(s,v) \in E} f(s,v) \\ 104 | #' s.t. && \sum_{u:(u,v) \in E} f(u,v) = \sum_{w:(v,w) \in E} f(v,w) && \forall v \in V - \{s,t\} \\ 105 | #' && f(u,v) \leq c(u,v) && \forall (u,v) \in E \\ 106 | #' && f(u,v) \geq 0 && \forall (u,v) \in E 107 | #' \end{align*} 108 | #' $$ 109 | 110 | #' Flow Network 3 111 | 112 | G = [ 113 | 0 3 2 2 0 0 0 0 114 | 0 0 0 0 5 1 0 0 115 | 0 0 0 0 1 3 1 0 116 | 0 0 0 0 0 1 0 0 117 | 0 0 0 0 0 0 0 4 118 | 0 0 0 0 0 0 0 2 119 | 0 0 0 0 0 0 0 4 120 | 0 0 0 0 0 0 0 0 121 | ] 122 | 123 | n = size(G)[1] 124 | 125 | max_flow = Model(with_optimizer(GLPK.Optimizer)) 126 | 127 | @variable(max_flow, f[1:n,1:n] >= 0) 128 | @constraint(max_flow, [i = 1:n, j = 1:n], f[i,j] <= G[i,j]) # Capacity constraints 129 | @constraint(max_flow, [i = 1:n; i != 1 && i != 8], sum(f[i,:]) == sum(f[:,i])) # Flow conservation contraints 130 | @objective(max_flow, Max, sum(f[1, :])) 131 | 132 | optimize!(max_flow) 133 | @show objective_value(max_flow); 134 | @show value.(f); -------------------------------------------------------------------------------- /script/modelling/rocket_control.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Rocket Control 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Iain Dunning 6 | 7 | #' This tutorial shows how to solve a nonlinear rocketry control problem. 8 | #' The problem was drawn from the [COPS3](http://www.mcs.anl.gov/~more/cops/cops3.pdf) benchmark. 9 | 10 | #' Our goal is to maximize the final altitude of a vertically launched rocket. 11 | #' We can control the thrust of the rocket, and must take account of 12 | #' the rocket mass, fuel consumption rate, gravity, and aerodynamic drag. 13 | 14 | #' Let us consider the basic description of the model (for the full description, 15 | #' including parameters for the rocket, see the COPS3 PDF) 16 | 17 | #' ### Overview 18 | #' We will use a discretized model of time, with a fixed number of time steps, $n$. 19 | #' We will make the time step size $\Delta t$, and thus the final time $t_f = n \cdot \Delta t$, a variable in the problem. 20 | #' To approximate the derivatives in the problem we will use the [trapezoidal rule](http://en.wikipedia.org/wiki/Trapezoidal_rule). 21 | 22 | #' ### State and Control 23 | #' We will have three state variables: 24 | #' 25 | #' * Velocity, $v$ 26 | #' * Altitude, $h$ 27 | #' * Mass of rocket and remaining fuel, $m$ 28 | #' 29 | #' and a single control variable, thrust $T$. 30 | #' Our goal is thus to maximize $h(t_f)$. 31 | #' Each of these corresponds to a JuMP variable indexed by the time step. 32 | 33 | #' ### Dynamics 34 | #' We have three equations that control the dynamics of the rocket: 35 | #' 36 | #' Rate of ascent: $$h^\prime = v$$ 37 | #' Acceleration: $$v^\prime = \frac{T - D(h,v)}{m} - g(h)$$ 38 | #' Rate of mass loss: $$m^\prime = -\frac{T}{c}$$ 39 | #' 40 | #' where drag $D(h,v)$ is a function of altitude and velocity, and gravity $g(h)$ is a function of altitude. 41 | 42 | #' These forces are defined as 43 | #' 44 | #' $$D(h,v) = D_c v^2 exp\left( -h_c \left( \frac{h-h(0)}{h(0)} \right) \right)$$ 45 | #' and 46 | #' $$g(h) = g_0 \left( \frac{h(0)}{h} \right)^2$$ 47 | #' 48 | #' The three rate equations correspond to JuMP constraints, 49 | #' and for convenience we will represent the forces with nonlinear expressions. 50 | 51 | using JuMP, Ipopt 52 | 53 | # Create JuMP model, using Ipopt as the solver 54 | rocket = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 55 | 56 | # Constants 57 | # Note that all parameters in the model have been normalized 58 | # to be dimensionless. See the COPS3 paper for more info. 59 | h_0 = 1 # Initial height 60 | v_0 = 0 # Initial velocity 61 | m_0 = 1 # Initial mass 62 | g_0 = 1 # Gravity at the surface 63 | 64 | T_c = 3.5 # Used for thrust 65 | h_c = 500 # Used for drag 66 | v_c = 620 # Used for drag 67 | m_c = 0.6 # Fraction of initial mass left at end 68 | 69 | c = 0.5 * sqrt(g_0 * h_0) # Thrust-to-fuel mass 70 | m_f = m_c * m_0 # Final mass 71 | D_c = 0.5 * v_c * m_0 / g_0 # Drag scaling 72 | T_max = T_c * g_0 * m_0 # Maximum thrust 73 | 74 | n = 800 # Time steps 75 | 76 | @variables(rocket, begin 77 | Δt ≥ 0, (start = 1/n) # Time step 78 | # State variables 79 | v[1:n] ≥ 0 # Velocity 80 | h[1:n] ≥ h_0 # Height 81 | m_f ≤ m[1:n] ≤ m_0 # Mass 82 | # Control 83 | 0 ≤ T[1:n] ≤ T_max # Thrust 84 | end) 85 | 86 | # Objective: maximize altitude at end of time of flight 87 | @objective(rocket, Max, h[n]) 88 | 89 | # Initial conditions 90 | @constraints(rocket, begin 91 | v[1] == v_0 92 | h[1] == h_0 93 | m[1] == m_0 94 | m[n] == m_f 95 | end) 96 | 97 | # Forces 98 | # Drag(h,v) = Dc v^2 exp( -hc * (h - h0) / h0 ) 99 | @NLexpression(rocket, drag[j = 1:n], D_c * (v[j]^2) * exp(-h_c * (h[j] - h_0) / h_0)) 100 | # Grav(h) = go * (h0 / h)^2 101 | @NLexpression(rocket, grav[j = 1:n], g_0 * (h_0 / h[j])^2) 102 | # Time of flight 103 | @NLexpression(rocket, t_f, Δt * n) 104 | 105 | # Dynamics 106 | for j in 2:n 107 | # h' = v 108 | 109 | # Rectangular integration 110 | # @NLconstraint(rocket, h[j] == h[j - 1] + Δt * v[j - 1]) 111 | 112 | # Trapezoidal integration 113 | @NLconstraint(rocket, 114 | h[j] == h[j - 1] + 0.5 * Δt * (v[j] + v[j - 1])) 115 | 116 | # v' = (T-D(h,v))/m - g(h) 117 | 118 | # Rectangular integration 119 | # @NLconstraint(rocket, v[j] == v[j - 1] + Δt *( 120 | # (T[j - 1] - drag[j - 1]) / m[j - 1] - grav[j - 1])) 121 | 122 | # Trapezoidal integration 123 | @NLconstraint(rocket, 124 | v[j] == v[j-1] + 0.5 * Δt * ( 125 | (T[j] - drag[j] - m[j] * grav[j]) / m[j] + 126 | (T[j - 1] - drag[j - 1] - m[j - 1] * grav[j - 1]) / m[j - 1])) 127 | 128 | # m' = -T/c 129 | 130 | # Rectangular integration 131 | # @NLconstraint(rocket, m[j] == m[j - 1] - Δt * T[j - 1] / c) 132 | 133 | # Trapezoidal integration 134 | @NLconstraint(rocket, 135 | m[j] == m[j - 1] - 0.5 * Δt * (T[j] + T[j-1]) / c) 136 | end 137 | 138 | # Solve for the control and state 139 | println("Solving...") 140 | status = optimize!(rocket) 141 | 142 | # Display results 143 | # println("Solver status: ", status) 144 | println("Max height: ", objective_value(rocket)) 145 | 146 | #+ 147 | 148 | # Can visualize the state and control variables 149 | using Gadfly 150 | 151 | #+ 152 | 153 | h_plot = plot(x = (1:n) * value.(Δt), y = value.(h)[:], Geom.line, 154 | Guide.xlabel("Time (s)"), Guide.ylabel("Altitude")) 155 | m_plot = plot(x = (1:n) * value.(Δt), y = value.(m)[:], Geom.line, 156 | Guide.xlabel("Time (s)"), Guide.ylabel("Mass")) 157 | v_plot = plot(x = (1:n) * value.(Δt), y = value.(v)[:], Geom.line, 158 | Guide.xlabel("Time (s)"), Guide.ylabel("Velocity")) 159 | T_plot = plot(x = (1:n) * value.(Δt), y = value.(T)[:], Geom.line, 160 | Guide.xlabel("Time (s)"), Guide.ylabel("Thrust")) 161 | draw(SVG(6inch, 6inch), vstack(hstack(h_plot, m_plot), hstack(v_plot, T_plot))) -------------------------------------------------------------------------------- /script/optimization_concepts/integer_programming.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Integer Programming 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' While we already know how to set a variable as integer or binary in the `@variable` macro, 8 | #' this tutorial covers other JuMP features for integer programming along with some modelling techniques. 9 | 10 | using JuMP, Random 11 | 12 | Random.seed!(1234); 13 | 14 | #' ## Modelling Logical Conditions 15 | #' Generally, in a mathematical programming problem, all constraints must hold. 16 | #' However, we might want to have conditions where we have some logical conditions between constraints. 17 | #' In such cases, we can use binary variables for modelling logical conditions between constraints. 18 | 19 | #' ### Disjunctive Constraints (OR) 20 | #' Suppose that we are given two constraints $a'x \geq b$ and $c' x \geq d$, 21 | #' in which all components of $a$ and $c$ are non-negative. 22 | #' We would like to model a requirement that at least one of the two constraints is satisfied. 23 | #' For this, we defined a binary variable $y$ and impose the constraints: 24 | 25 | #' $$ 26 | #' \begin{align*} 27 | #' a' x \geq y b \\ 28 | #' c' x \geq (1 - y) d \\ 29 | #' y \in \{0,1\} 30 | #' \end{align*} 31 | #' $$ 32 | 33 | a = rand(1:100, 5, 5) 34 | c = rand(1:100, 5, 5) 35 | b = rand(1:100, 5) 36 | d = rand(1:100, 5) 37 | 38 | model = Model() 39 | @variable(model, x[1:5]) 40 | @variable(model, y, Bin) 41 | @constraint(model, a * x .>= y .* b) 42 | @constraint(model, c * x .>= (1 - y) .* d); 43 | 44 | #' ### Conditional Constraints ($\implies$) 45 | #' Suppose we want to model that a certain linear inequality must be satisfied when some other event occurs. 46 | #' i.e. for a binary variable $z$, we want to model the implication 47 | 48 | #' $$ 49 | #' \begin{align*} 50 | #' z = 1 \implies a^Tx\leq b 51 | #' \end{align*} 52 | #' $$ 53 | 54 | #' If we know in advance an upper bound $a^Tx\leq b$. Then we can write the above as a linear inequality 55 | 56 | #' $$ 57 | #' \begin{align*} 58 | #' a^Tx\leq b + M(1-z) 59 | #' \end{align*} 60 | #' $$ 61 | 62 | a = rand(1:100, 5, 5) 63 | b = rand(1:100, 5) 64 | m = rand(10000:11000, 5) 65 | 66 | model = Model() 67 | @variable(model, x[1:5]) 68 | @variable(model, z, Bin) 69 | @constraint(model, a * x .<= b .+ (m .* (1 - z))); 70 | # If z was a regular Julia variable, we would not have had to use the vectorized dot operator 71 | 72 | #' ### Boolean Operators on Binary Variables 73 | #' The following table is useful when we want to model boolean operators in the form of 74 | #' linear inequalities that can be given to a solver. 75 | 76 | #' | Boolean Expression | Constraint | 77 | #' |:---------- | ----------:| 78 | #' | $z=x \lor y$ | $x \leq z, y \leq z, z \leq x+y$ | 79 | #' | $z=x \land y$ | $x \geq z, y \geq z, z+1 \geq x+y$ | 80 | #' | $z= \neg x$ | $z = 1 − x$ | 81 | #' | $x \implies y$ | $x \leq y$ | 82 | #' | $x \iff y$ | $x = y$ | 83 | 84 | #' ## Modelling Integer Variables 85 | 86 | #' ### Integer Variables using Constraints 87 | #' We can add binary and integer restrictions to the domain of each variable using the `@constraint` macro as well. 88 | 89 | model = Model() 90 | 91 | @variable(model, x) 92 | @variable(model, y) 93 | @constraint(model, x in MOI.ZeroOne()) 94 | @constraint(model, y in MOI.Integer()); 95 | 96 | #' ### Semi-Continuous Variables 97 | #' A semi-continuous variable is a continuous variable 98 | #' between bounds $[l,u]$ that also can assume the value zero. ie. 99 | #' $$ 100 | #' x \in \{0\} \cup \{l,u\} 101 | #' $$ 102 | 103 | l = 7.45 104 | u = 22.22 105 | @variable(model, a) 106 | @constraint(model, a in MOI.Semicontinuous(l, u)) 107 | 108 | #' ### Semi-Integer Variables 109 | #' A semi-integer variable is a variable which asummes integer values 110 | #' between bounds $[l,u]$ and can also assume the value zero. ie. 111 | 112 | #' $$ 113 | #' x \in \{0\} \cup (\{l,u\} \cap \mathbb{Z}) 114 | #' $$ 115 | 116 | l = 5 117 | u = 34 118 | @variable(model, b) 119 | @constraint(model, b in MOI.Semiinteger(l, u)) 120 | 121 | #' Note that the bounds specified in `MOI.Semiinteger` must be integral otherwise it would throw an error. 122 | 123 | #' ## Special Ordered Sets 124 | 125 | #' ### Special Ordered Sets of type 1 (SOS1) 126 | #' A Special Ordered Set of type 1 is a set of variables, 127 | #' at most one of which can take a non-zero value, all others being at 0. 128 | #' They most frequently apply where a set of variables are actually 0-1 variables: 129 | #' in other words, we have to choose at most one from a set of possibilities. 130 | 131 | @variable(model, u[1:3]) 132 | @constraint(model, u in MOI.SOS1([1.0, 2.0, 3.0])) 133 | 134 | #' Note that we have to pass MOI.SOS1 a weight vector which is essentially an ordering on the variables. 135 | #' If the decision variables are related and have a physical ordering, then the weight vector, 136 | #' although not used directly in the constraint, can help the solver make a better decision in the solution process. 137 | 138 | #' ### Special Ordered Sets of type 2 (SOS2) 139 | 140 | #' A Special Ordered Set of type 2 is a set of non-negative variables, 141 | #' of which at most two can be non-zero, and if two are non-zero these must be consecutive in their ordering. 142 | 143 | @variable(model, v[1:3]) 144 | @constraint(model, v in MOI.SOS2([3.0, 1.0, 2.0])) 145 | 146 | #' The ordering provided by the weight vector is more important in this case as 147 | #' the variables need to be consecutive according to the ordering. 148 | #' For example, in the above constraint, the possible pairs are: 149 | #' * Consecutive 150 | #' * (`x[1]` and `x[3]`) as they correspond to 3 and 2 resp. and thus can be non-zero 151 | #' * (`x[2]` and `x[3]`) as they correspond to 1 and 2 resp. and thus can be non-zero 152 | #' * Non-consecutive 153 | #' * (`x[1]` and `x[2]`) as they correspond to 3 and 1 resp. and thus cannot be non-zero -------------------------------------------------------------------------------- /test/modelling/geometric_problems.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP 3 | using Ipopt 4 | using Random 5 | # for plots 6 | using Gadfly 7 | using DataFrames 8 | 9 | Random.seed!(1234); 10 | 11 | 12 | x = rand(10) 13 | a = rand(10) 14 | b = rand() 15 | 16 | projection = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 17 | @variable(projection, x0[1:10]) 18 | @objective(projection, Min, sum((x - x0) .* (x - x0))) # We minimize the square of the distance here 19 | @constraint(projection, x0' * a == b) # Point must lie on the hyperplane 20 | 21 | optimize!(projection) 22 | @show objective_value(projection); 23 | @show value.(x0); 24 | 25 | 26 | A_1 = rand(10, 10) 27 | A_2 = rand(10, 10) 28 | b_1 = rand(10) 29 | b_2 = rand(10) 30 | 31 | polyhedra_distance = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 32 | @variable(polyhedra_distance, x[1:10]) # Point closest on the first polyhedron 33 | @variable(polyhedra_distance, y[1:10]) # Point closest on the second polyhedron 34 | @objective(polyhedra_distance, Min, sum((x - y) .* (x - y))) # We minimize the square of the distance here as above 35 | @constraint(polyhedra_distance, A_1 * x .<= b_1) # Point x must lie on the first polyhedron 36 | @constraint(polyhedra_distance, A_2 * y .<= b_2) # Point y must lie on the second polyhedron 37 | 38 | optimize!(polyhedra_distance) 39 | @show objective_value(polyhedra_distance); 40 | 41 | 42 | fixed = [ 1 1 -1 -1 1 -1 -0.2 0.1; # coordinates of fixed points 43 | 1 -1 -1 1 -0.5 -0.2 -1 1] 44 | 45 | M = size(fixed,2) # number of fixed points 46 | N = 6 # number of free points 47 | 48 | A = [ 1 0 0 -1 0 0 0 0 0 0 0 0 0 0; # Matrix on links 49 | 1 0 -1 0 0 0 0 0 0 0 0 0 0 0; 50 | 1 0 0 0 -1 0 0 0 0 0 0 0 0 0; 51 | 1 0 0 0 0 0 -1 0 0 0 0 0 0 0; 52 | 1 0 0 0 0 0 0 -1 0 0 0 0 0 0; 53 | 1 0 0 0 0 0 0 0 0 0 -1 0 0 0; 54 | 1 0 0 0 0 0 0 0 0 0 0 0 0 -1; 55 | 0 1 -1 0 0 0 0 0 0 0 0 0 0 0; 56 | 0 1 0 -1 0 0 0 0 0 0 0 0 0 0; 57 | 0 1 0 0 0 -1 0 0 0 0 0 0 0 0; 58 | 0 1 0 0 0 0 0 -1 0 0 0 0 0 0; 59 | 0 1 0 0 0 0 0 0 -1 0 0 0 0 0; 60 | 0 1 0 0 0 0 0 0 0 0 0 0 -1 0; 61 | 0 0 1 -1 0 0 0 0 0 0 0 0 0 0; 62 | 0 0 1 0 0 0 0 -1 0 0 0 0 0 0; 63 | 0 0 1 0 0 0 0 0 0 0 -1 0 0 0; 64 | 0 0 0 1 -1 0 0 0 0 0 0 0 0 0; 65 | 0 0 0 1 0 0 0 0 -1 0 0 0 0 0; 66 | 0 0 0 1 0 0 0 0 0 -1 0 0 0 0; 67 | 0 0 0 1 0 0 0 0 0 0 0 -1 0 0; 68 | 0 0 0 1 0 0 0 0 0 0 0 -1 0 0; 69 | 0 0 0 0 1 -1 0 0 0 0 0 0 0 0; 70 | 0 0 0 0 1 0 -1 0 0 0 0 0 0 0; 71 | 0 0 0 0 1 0 0 0 0 -1 0 0 0 0; 72 | 0 0 0 0 1 0 0 0 0 0 0 0 0 -1; 73 | 0 0 0 0 0 1 0 0 -1 0 0 0 0 0; 74 | 0 0 0 0 0 1 0 0 0 0 -1 0 0 0;] 75 | 76 | placement = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 77 | @variable(placement, p[1:M + N, 1:2]) # A variable array for the coordinates of each point 78 | @constraint(placement, p[N + 1:N + M, :] .== fixed') # We had a constraint for the fixed points 79 | dist = A * p # Matrix of differences between coordinates of 2 points with a link 80 | @objective(placement, Min, sum(dist .* dist)) # We minimize the sum of the square of the distances 81 | 82 | optimize!(placement) 83 | @show value.(p); 84 | @show objective_value(placement); 85 | 86 | 87 | # Plotting the points 88 | df = DataFrame() 89 | df.x = value.(p)[:,1] 90 | df.y = value.(p)[:,2] 91 | df.type = vcat(fill("Free points", N), fill("Fixed points", M)) 92 | plt = plot(df, x = "x", y = "y", color = "type", Geom.point) 93 | draw(SVG(6inch, 6inch), plt) 94 | 95 | 96 | n = 5; 97 | 98 | Amin = [ # We'll try this problem with 4 times with different minimum area constraints 99 | 100 100 100 100 100; 100 | 20 50 80 150 200; 101 | 180 80 80 80 80; 102 | 20 150 20 200 110] 103 | 104 | r = 1 105 | 106 | figs=[] 107 | 108 | for i = 1:4 109 | A = Amin[i, :] 110 | 111 | floor_planning = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 112 | 113 | @variables(floor_planning, begin 114 | x[1:n] >= r 115 | y[1:n] >= r 116 | w[1:n] >= 0 117 | h[1:n] >= 0 118 | W 119 | H 120 | end) 121 | 122 | @constraints(floor_planning, begin 123 | x[5] + w[5] + r <= W # No rectangles at the right of Rectangle 5 124 | x[1] + w[1] + r <= x[3] # Rectangle 1 is at the left of Rectangle 3 125 | x[2] + w[2] + r <= x[3] # Rectangle 2 is at the left of Rectangle 3 126 | x[3] + w[3] + r <= x[5] # Rectangle 3 is at the left of Rectangle 5 127 | x[4] + w[4] + r <= x[5] # Rectangle 4 is at the left of Rectangle 5 128 | y[4] + h[4] + r <= H # No rectangles on top of Rectangle 4 129 | y[5] + h[5] + r <= H # No rectangles on top of Rectangle 5 130 | y[2] + h[2] + r <= y[1] # Rectangle 2 is below Rectangle 1 131 | y[1] + h[1] + r <= y[4] # Rectangle 1 is below Rectangle 4 132 | y[3] + h[3] + r <= y[4] # Rectangle 3 is below Rectangle 4 133 | w .<= 5*h # Aspect ratio constraint 134 | h .<= 5*w # Aspect ratio constraint 135 | A .<= h .* w # Area constraint 136 | end) 137 | 138 | @objective(floor_planning, Min, W + H) 139 | 140 | optimize!(floor_planning) 141 | 142 | @show objective_value(floor_planning); 143 | 144 | D = DataFrame(x = value.(x), y = value.(y), x2 = value.(x) .+ value.(w), y2 = value.(y) .+ value.(h)) 145 | plt = plot(D, xmin = :x, ymin = :y, xmax = :x2, ymax = :y2, Geom.rect) 146 | push!(figs, plt) 147 | end 148 | 149 | 150 | draw(SVG(6inch, 6inch), vstack(hstack(figs[1], figs[2]), hstack(figs[3], figs[4]))) 151 | 152 | -------------------------------------------------------------------------------- /script/modelling/experiment_design.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Experiment Design 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia, Chris Coey 6 | 7 | #' This tutorial covers experiment design examples (D-optimal, A-optimal, and E-optimal) 8 | #' from section 7.5 of the book Convex Optimization by Boyd and Vandenberghe[[1]](#c1) 9 | 10 | #' ## Relaxed Experiment Design Problem 11 | 12 | #' The basic experiment design problem is as follows. 13 | #' Given the menu of possible choices for experiments, $v_{1}, \ldots, v_{p}$, 14 | #' and the total number $m$ of experiments to be carried out, choose the numbers of each type of experiment, 15 | #' $i . e ., m_{1}, \ldots, m_{p}$ to make the error covariance $E$ small (in some sense). 16 | #' The variables $m_{1}, \ldots, m_{p}$ must, of course, be integers and sum to $m,$ the given total number of experiments. 17 | #' This leads to the optimization problem 18 | 19 | #' $$ 20 | #' \begin{array}{cl}{\operatorname{minimize}\left(\mathrm{w.r.t.} \mathbf{S}_{+}^{n}\right)} & {E=\left(\sum_{j=1}^{p} m_{j} v_{j} v_{j}^{T}\right)^{-1}} \\ {\text { subject to }} & {m_{i} \geq 0, \quad m_{1}+\cdots+m_{p}=m} \\ {} & {m_{i} \in \mathbf{Z}}\end{array} 21 | #' $$ 22 | 23 | #' The basic experiment design problem can be a hard combinatorial problem when $m,$ the total number of experiments, 24 | #' is comparable to $n$ , since in this case the $m_{i}$ are all small integers. 25 | #' In the case when $m$ is large compared to $n$ , however, a good approximate solution can be found by ignoring, 26 | #' or relaxing, the constraint that the $m_{i}$ are integers. 27 | #' Let $\lambda_{i}=m_{i} / m,$ which is the fraction of the total number of experiments for which 28 | #' $a_{j}=v_{i},$ or the relative frequency of experiment $i$. 29 | #' We can express the error covariance in terms of $\lambda_{i}$ as 30 | 31 | #' $$ 32 | #' E=\frac{1}{m}\left(\sum_{i=1}^{p} \lambda_{i} v_{i} v_{i}^{T}\right)^{-1} 33 | #' $$ 34 | 35 | #' The vector $\lambda \in \mathbf{R}^{p}$ satisfies $\lambda \succeq 0, 36 | #' \mathbf{1}^{T} \lambda=1,$ and also, each $\lambda_{i}$ is an integer multiple of $1 / m$. 37 | #' By ignoring this last constraint, we arrive at the problem 38 | 39 | #' $$ 40 | #' \begin{array}{ll}{\operatorname{minimize}\left(\mathrm{w.r.t.} \mathbf{S}_{+}^{n}\right)} & {E=(1 / m)\left(\sum_{i=1}^{p} \lambda_{i} v_{i} v_{i}^{T}\right)^{-1}} \\ {\text { subject to }} & {\lambda \succeq 0, \quad \mathbf{1}^{T} \lambda=1}\end{array} 41 | #' $$ 42 | 43 | #' ## Types of Experiment Design Problems 44 | 45 | #' Several scalarizations have been proposed for the experiment design problem, 46 | #' which is a vector optimization problem over the positive semidefinite cone. 47 | 48 | using JuMP 49 | using SCS 50 | using LinearAlgebra 51 | using Random 52 | 53 | Random.seed!(1234); 54 | 55 | q = 4 # dimension of estimate space 56 | p = 8 # number of experimental vectors 57 | nmax = 3 # upper bound on lambda 58 | n = 12 59 | 60 | V = randn(q, p) 61 | 62 | eye = Matrix{Float64}(I, q, q); 63 | 64 | #' ### A-optimal design 65 | 66 | #' In A-optimal experiment design, we minimize tr $E$, the trace of the covariance matrix. 67 | #' This objective is simply the mean of the norm of the error squared: 68 | 69 | #' $$ 70 | #' \mathbf{E}\|e\|_{2}^{2}=\mathbf{E} \operatorname{tr}\left(e e^{T}\right)=\operatorname{tr} E 71 | #' $$ 72 | 73 | #' The A-optimal experiment design problem in SDP form is 74 | 75 | #' $$ 76 | #' \begin{array}{ll}{\operatorname{minimize}} & {\mathbf{1}^{T} u} \\ {\text { subject to }} & {\left[\begin{array}{cc}{\sum_{i=1}^{p} \lambda_{i} v_{i} v_{i}^{T}} & {e_{k}} \\ {e_{k}^{T}} & {u_{k}}\end{array}\right] \succeq 0, \quad k=1, \ldots, n} \\ {} & {\lambda \succeq 0, \quad \mathbf{1}^{T} \lambda=1}\end{array} 77 | #' $$ 78 | 79 | aOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 80 | @variable(aOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 81 | @variable(aOpt, u[1:q], lower_bound = 0) 82 | 83 | @constraint(aOpt, sum(np) <= n) 84 | for i = 1:q 85 | @SDconstraint(aOpt, [V * diagm(0 => np ./ n) * V' eye[:, i]; eye[i, :]' u[i]] >= 0) 86 | end 87 | 88 | @objective(aOpt, Min, sum(u)) 89 | 90 | optimize!(aOpt) 91 | 92 | @show objective_value(aOpt); 93 | @show value.(np); 94 | 95 | #' ### E-optimal design 96 | 97 | #' In $E$ -optimal design, we minimize the norm of the error covariance matrix, i.e. the maximum eigenvalue of $E$. 98 | #' Since the diameter (twice the longest semi-axis) of the confidence ellipsoid $\mathcal{E}$ 99 | #' is proportional to $\|E\|_{2}^{1 / 2}$, 100 | #' minimizing $\|E\|_{2}$ can be interpreted geometrically as minimizing the diameter of the confidence ellipsoid. 101 | #' E-optimal design can also be interpreted as minimizing the maximum variance of $q^{T} e$, 102 | #' over all $q$ with $\|q\|_{2}=1$. 103 | #' The E-optimal experiment design problem in SDP form is 104 | 105 | #' $$ 106 | #' \begin{array}{cl}{\operatorname{maximize}} & {t} \\ {\text { subject to }} & {\sum_{i=1}^{p} \lambda_{i} v_{i} v_{i}^{T} \succeq t I} \\ {} & {\lambda \succeq 0, \quad \mathbf{1}^{T} \lambda=1}\end{array} 107 | #' $$ 108 | 109 | eOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 110 | @variable(eOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 111 | @variable(eOpt, t) 112 | 113 | @SDconstraint(eOpt, V * diagm(0 => np ./ n) * V' - (t .* eye) >= 0) 114 | @constraint(eOpt, sum(np) <= n) 115 | 116 | @objective(eOpt, Max, t) 117 | 118 | optimize!(eOpt) 119 | 120 | @show objective_value(eOpt); 121 | @show value.(np); 122 | 123 | #' ### D-optimal design 124 | #' The most widely used scalarization is called $D$ -optimal design, 125 | #' in which we minimize the determinant of the error covariance matrix $E$. 126 | #' This corresponds to designing the experiment to minimize the volume of the resulting confidence ellipsoid 127 | #' (for a fixed confidence level). 128 | #' Ignoring the constant factor 1$/ m$ in $E$, and taking the logarithm of the objective, 129 | #' we can pose this problem as convex optimization problem 130 | 131 | #' $$ 132 | #' \begin{array}{ll}{\operatorname{minimize}} & {\log \operatorname{det}\left(\sum_{i=1}^{p} \lambda_{i} v_{i} v_{i}^{T}\right)^{-1}} \\ {\text { subject to }} & {\lambda \succeq 0, \quad \mathbf{1}^{T} \lambda=1}\end{array} 133 | #' $$ 134 | 135 | dOpt = Model(with_optimizer(SCS.Optimizer, verbose = 0)) 136 | @variable(dOpt, np[1:p], lower_bound = 0, upper_bound = nmax) 137 | @variable(dOpt, t) 138 | @objective(dOpt, Max, t) 139 | @constraint(dOpt, sum(np) <= n) 140 | E = V * diagm(0 => np ./ n) * V' 141 | @constraint(dOpt, [t, 1, (E[i, j] for i in 1:q for j in 1:i)...] in MOI.LogDetConeTriangle(q)) 142 | 143 | optimize!(dOpt) 144 | 145 | @show objective_value(dOpt); 146 | @show value.(np); 147 | 148 | #' ### References 149 | #' 150 | #' 1. Boyd, S., & Vandenberghe, L. (2004). Convex Optimization. Cambridge: Cambridge University Press. doi:10.1017/CBO9780511804441 -------------------------------------------------------------------------------- /script/using_JuMP/nonlinear_modelling.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Nonlinear Modelling 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' This tutorial provides a breif introduction to nonlinear modelling in JuMP. 8 | #' For more details and specifics, visit the [JuMP docs](http://www.juliaopt.org/JuMP.jl/stable/nlp/). 9 | 10 | #' ## Nonlinear Programs 11 | #' While we have already seen examples of linear, quadratic and conic programs, 12 | #' JuMP also supports other general smooth nonlinear (convex and nonconvex) optimization problems. 13 | 14 | #' A JuMP model object can contain a mix of linear, quadratic, and nonlinear contraints or objective functions. 15 | #' Thus, a model object for a nonlinear program is constructed in the same way as before. 16 | 17 | using JuMP, Ipopt 18 | model = Model(with_optimizer(Ipopt.Optimizer)); 19 | 20 | #' ### Variables 21 | #' Variables are modelled using the `@variable` macro as usual and 22 | #' a starting point may be provided by using the `start` keyword argument 23 | 24 | @variable(model, x, start = 4) 25 | @variable(model, y, start = -9.66); 26 | 27 | #' ### Parameters 28 | #' Only in the case of nonlinear models, JuMP offers a syntax for "parameter" objects 29 | #' which can refer to a numerical value. 30 | 31 | @NLparameter(model, p == 0.003); # Providing a starting value is necessary for parameters 32 | @NLparameter(model, l[i = 1:10] == 4 - i); # A collection of parameters 33 | 34 | #' The `value` and `set_value` functions are used to query and update the value of a parameter respectively. 35 | 36 | value(l[1]) 37 | 38 | #+ 39 | 40 | set_value(l[1], -4) 41 | value(l[1]) 42 | 43 | #' Parameters are useful since it's faster to modify a model in-place by changing the value of the parameter 44 | #' compared to creating an entirely new model object. 45 | 46 | #' ### Expressions 47 | #' JuMP also supports the creation of arithmetic expressions which can then be inserted into 48 | #' constraints, the objective and other expressions. 49 | 50 | @NLexpression(model, expr_1, sin(x)) 51 | @NLexpression(model, expr_2, asin(expr_1)); # Inserting one expression into another 52 | 53 | #' There are some [syntax rules](https://pkg.julialang.org/docs/JuMP/DmXqY/0.19.2/nlp/#Syntax-notes-1) 54 | #' which must be followed while writing a nonlinear expression. 55 | 56 | #' Note that JuMP also supports linear and quadratic expression. 57 | #' You can find out more about this functionality in the [docs](https://pkg.julialang.org/docs/JuMP/DmXqY/0.19.2/expressions/). 58 | 59 | #' ### Nonlinear Objectives and Constraints 60 | #' Nonlinear objectives and constraints are specified by using the `@NLobjective` and `@NLconstraint` macros. 61 | 62 | @NLconstraint(model, exp(x) + y^4 <= 0) 63 | @NLobjective(model, Min, tan(x) + log(y)) 64 | 65 | #' ### User-defined Functions 66 | #' In addition to supporting a library of built-in functions, 67 | #' JuMP supports the creation of user-defined nonlinear functions to use within nonlinear expressions. 68 | #' The `register` function is used to enable this functionality. 69 | 70 | my_function(a,b) = (a * b)^-6 + (b / a)^3 71 | register(model, :my_function, 2, my_function, autodiff = true) 72 | 73 | #' The arguements to the function are: 74 | #' - model for which the function is being registered 75 | #' - Julia symbol object corresponding to the name of the function 76 | #' - Number of arguments the function takes 77 | #' - name of the Julia method 78 | #' - instruction for JuMP to compute exact gradients automatically 79 | 80 | #' ## MLE using JuMP 81 | 82 | #' Since we already have a bit of JuMP experience at this point, 83 | #' let's try a modelling example and apply what we have learnt. 84 | #' In this example, we compute the maximum likelihood estimate (MLE) of 85 | #' the parameters of a Gaussian distribution i.e. the sample mean and variance. 86 | 87 | #' If $X_{1}, \ldots, X_{n}$ are an id sample from a population with pdf or pmf 88 | #' $f\left(x | \theta_{1}, \ldots, \theta_{k}\right),$ the likelihood function is defined by 89 | 90 | #' $$ 91 | #' L(\theta | \mathbf{x})=L\left(\theta_{1}, \ldots, \theta_{k} | x_{1}, \ldots, x_{n}\right)=\prod_{i=1}^{n} f\left(x_{i} | \theta_{1}, \ldots, \theta_{k}\right) 92 | #' $$ 93 | 94 | #' For each sample point $\mathbf{x}$, let $\hat{\theta}(\mathbf{x})$ be a parameter value 95 | #' at which $L(\theta | \mathbf{x})$ attains its maximum as a function of $\theta,$ with $\mathbf{x}$ held fixed. 96 | #' A maximum likelihood estimator (MLE) of the parameter $\theta$ based on a sample $\mathbf{X}$ is 97 | #' $\hat{\theta}(\mathbf{X})$. 98 | 99 | #'The Gaussian likelihood is - 100 | 101 | #' $$ 102 | #' L(\theta | \mathbf{x})=\prod_{i=1}^{n} \frac{1}{(2 \pi)^{1 / 2}} e^{-(1 / 2)\left(x_{i}-\theta\right)^{2}}=\frac{1}{(2 \pi)^{n / 2}} e^{(-1 / 2) \Sigma_{i=1}^{n}\left(x_{i}-\theta\right)^{2}} 103 | #' $$ 104 | 105 | #' In most cases, the natural logarithm of 106 | #' $L(\theta | \mathbf{x}), \log L(\theta | \mathbf{x})$ (known as the log likelihood), 107 | #' is used rather than $L(\theta | \mathbf{x})$ directly. 108 | #' The reason is that the log likelihood is easier to differentiate. 109 | #' This substituion is possible because the log function is strictly increasing on $(0, \infty)$, 110 | #' which implies that the extrema of $L(\theta | \mathbf{x})$ and $\log L(\theta | \mathbf{x})$ coincide. 111 | 112 | using Random, Statistics 113 | 114 | Random.seed!(1234) 115 | 116 | n = 1_000 117 | data = randn(n) 118 | 119 | mle = Model(with_optimizer(Ipopt.Optimizer, print_level = 0)) 120 | @NLparameter(mle, problem_data[i = 1:n] == data[i]) 121 | @variable(mle, μ, start = 0.0) 122 | @variable(mle, σ >= 0.0, start = 1.0) 123 | @NLexpression(mle, likelihood, 124 | (2 * π * σ^2)^(-n / 2) * exp(-(sum((problem_data[i] - μ)^2 for i in 1:n) / (2 * σ^2))) 125 | ) 126 | 127 | @NLobjective(mle, Max, log(likelihood)) 128 | 129 | optimize!(mle) 130 | 131 | println("μ = ", value(μ)) 132 | println("mean(data) = ", mean(data)) 133 | println("σ^2 = ", value(σ)^2) 134 | println("var(data) = ", var(data)) 135 | println("MLE objective: ", objective_value(mle)) 136 | 137 | #+ 138 | 139 | # Changing the data 140 | 141 | data = randn(n) 142 | optimize!(mle) 143 | 144 | println("μ = ", value(μ)) 145 | println("mean(data) = ", mean(data)) 146 | println("σ^2 = ", value(σ)^2) 147 | println("var(data) = ", var(data)) 148 | println("MLE objective: ", objective_value(mle)) 149 | 150 | #' ## Writing Convex Models 151 | 152 | #' Nonlinear solvers like Ipopt are usually local solvers. 153 | #' For convex problems, the local optima is also the global optima, 154 | #' and thus Ipopt is able to provide us with the correct solution. 155 | #' However, in case a problem is not written in the convex form, then we may be unable to solve it. 156 | 157 | #' A tool that helps us in dealing with this issue is [Disciplined Convex Programming](https://dcp.stanford.edu) (DCP). 158 | #' DCP is a system for constructing mathematical expressions with known curvature from a given library of base functions. 159 | #' Specifically, it helps us to construct convex optimization models when possible, 160 | #' i.e. minimize convex function or maximize concave function and use constraints that are convex $f$ <= concave $g$ -------------------------------------------------------------------------------- /notebook/modelling/img/g3.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | %3 11 | 12 | 13 | 1 14 | 15 | 1 16 | 17 | 18 | 2 19 | 20 | 2 21 | 22 | 23 | 1->2 24 | 25 | 26 | 3 27 | 28 | 29 | 3 30 | 31 | 3 32 | 33 | 34 | 1->3 35 | 36 | 37 | 2 38 | 39 | 40 | 4 41 | 42 | 4 43 | 44 | 45 | 1->4 46 | 47 | 48 | 2 49 | 50 | 51 | 5 52 | 53 | 5 54 | 55 | 56 | 2->5 57 | 58 | 59 | 5 60 | 61 | 62 | 6 63 | 64 | 6 65 | 66 | 67 | 2->6 68 | 69 | 70 | 1 71 | 72 | 73 | 3->5 74 | 75 | 76 | 1 77 | 78 | 79 | 3->6 80 | 81 | 82 | 3 83 | 84 | 85 | 7 86 | 87 | 7 88 | 89 | 90 | 3->7 91 | 92 | 93 | 1 94 | 95 | 96 | 4->6 97 | 98 | 99 | 1 100 | 101 | 102 | 8 103 | 104 | 8 105 | 106 | 107 | 5->8 108 | 109 | 110 | 4 111 | 112 | 113 | 6->8 114 | 115 | 116 | 2 117 | 118 | 119 | 7->8 120 | 121 | 122 | 4 123 | 124 | 125 | 126 | -------------------------------------------------------------------------------- /test/modelling/power_systems.jl: -------------------------------------------------------------------------------- 1 | 2 | using JuMP, GLPK, LinearAlgebra, DataFrames 3 | 4 | 5 | # Define some input data about the test system 6 | # Maximum power output of generators 7 | g_max = [1000, 1000]; 8 | # Minimum power output of generators 9 | g_min = [0, 300]; 10 | # Incremental cost of generators 11 | c_g = [50, 100]; 12 | # Fixed cost of generators 13 | c_g0 = [1000, 0] 14 | # Incremental cost of wind generators 15 | c_w = 50; 16 | # Total demand 17 | d = 1500; 18 | # Wind forecast 19 | w_f = 200; 20 | 21 | 22 | # In this cell we create function solve_ed, which solves the economic dispatch problem for a given set of input parameters. 23 | function solve_ed(g_max, g_min, c_g, c_w, d, w_f) 24 | #Define the economic dispatch (ED) model 25 | ed = Model(with_optimizer(GLPK.Optimizer)) 26 | 27 | # Define decision variables 28 | @variable(ed, 0 <= g[i = 1:2] <= g_max[i]) # power output of generators 29 | @variable(ed, 0 <= w <= w_f) # wind power injection 30 | 31 | # Define the objective function 32 | @objective(ed, Min, dot(c_g, g) + c_w * w) 33 | 34 | # Define the constraint on the maximum and minimum power output of each generator 35 | @constraint(ed, [i = 1:2], g[i] <= g_max[i]) #maximum 36 | @constraint(ed, [i = 1:2], g[i] >= g_min[i]) #minimum 37 | 38 | # Define the constraint on the wind power injection 39 | @constraint(ed, w <= w_f) 40 | 41 | # Define the power balance constraint 42 | @constraint(ed, sum(g) + w == d) 43 | 44 | # Solve statement 45 | optimize!(ed) 46 | 47 | # return the optimal value of the objective function and its minimizers 48 | return value.(g), value(w), w_f - value(w), objective_value(ed) 49 | end 50 | 51 | # Solve the economic dispatch problem 52 | (g_opt, w_opt, ws_opt, obj) = solve_ed(g_max, g_min, c_g, c_w, d, w_f); 53 | 54 | println("\n") 55 | println("Dispatch of Generators: ", g_opt, " MW") 56 | println("Dispatch of Wind: ", w_opt, " MW") 57 | println("Wind spillage: ", w_f - w_opt, " MW") 58 | println("\n") 59 | println("Total cost: ", obj, "\$") 60 | 61 | 62 | c_g_scale_df = DataFrame(Symbol("Dispatch of Generator 1(MW)") => Float64[], 63 | Symbol("Dispatch of Generator 2(MW)") => Float64[], 64 | Symbol("Dispatch of Wind(MW)") => Float64[], 65 | Symbol("Spillage of Wind(MW)") => Float64[], 66 | Symbol("Total cost(\$)") => Float64[]) 67 | for c_g1_scale = 0.5:0.1:3.0 68 | c_g_scale = [c_g[1] * c_g1_scale, c_g[2]] # update the incremental cost of the first generator at every iteration 69 | g_opt, w_opt, ws_opt, obj = solve_ed(g_max, g_min, c_g_scale, c_w, d, w_f) # solve the ed problem with the updated incremental cost 70 | push!(c_g_scale_df, (g_opt[1], g_opt[2], w_opt, ws_opt, obj)) 71 | end 72 | 73 | 74 | ENV["COLUMNS"]=250 # Helps us display the complete table 75 | c_g_scale_df 76 | 77 | 78 | function solve_ed_inplace(c_w_scale) 79 | start = time() 80 | obj_out = Float64[] 81 | w_out = Float64[] 82 | g1_out = Float64[] 83 | g2_out = Float64[] 84 | 85 | ed = Model(with_optimizer(GLPK.Optimizer)) 86 | 87 | # Define decision variables 88 | @variable(ed, 0 <= g[i = 1:2] <= g_max[i]) # power output of generators 89 | @variable(ed, 0 <= w <= w_f ) # wind power injection 90 | 91 | # Define the objective function 92 | @objective(ed, Min, dot(c_g, g) + c_w * w) 93 | 94 | # Define the constraint on the maximum and minimum power output of each generator 95 | @constraint(ed, [i = 1:2], g[i] <= g_max[i]) #maximum 96 | @constraint(ed, [i = 1:2], g[i] >= g_min[i]) #minimum 97 | 98 | # Define the constraint on the wind power injection 99 | @constraint(ed, w <= w_f) 100 | 101 | # Define the power balance constraint 102 | @constraint(ed, sum(g) + w == d) 103 | 104 | optimize!(ed) 105 | 106 | for c_g1_scale = 0.5:0.01:3.0 107 | @objective(ed, Min, c_g1_scale*c_g[1]*g[1] + c_g[2]*g[2] + c_w_scale*c_w*w) 108 | optimize!(ed) 109 | push!(obj_out, objective_value(ed)) 110 | push!(w_out, value(w)) 111 | push!(g1_out, value(g[1])) 112 | push!(g2_out, value(g[2])) 113 | end 114 | elapsed = time() - start 115 | print(string("elapsed time: ", elapsed, " seconds")) 116 | return obj_out, w_out, g1_out, g2_out 117 | end 118 | 119 | solve_ed_inplace(2.0); 120 | 121 | 122 | demandscale_df = DataFrame(Symbol("Dispatch of Generators(MW)") => Float64[], 123 | Symbol("Dispatch of Generator 2(MW)") => Float64[], 124 | Symbol("Dispatch of Wind(MW)") => Float64[], 125 | Symbol("Spillage of Wind(MW)") => Float64[], 126 | Symbol("Total cost(\$)") => Float64[]) 127 | 128 | for demandscale = 0.2:0.1:1.5 129 | g_opt,w_opt,ws_opt,obj = solve_ed(g_max, g_min, c_g, c_w, demandscale*d, w_f) 130 | 131 | push!(demandscale_df, (g_opt[1], g_opt[2], w_opt, ws_opt, obj)) 132 | end 133 | 134 | 135 | demandscale_df 136 | 137 | 138 | # In this cell we introduce binary decision u to the economic dispatch problem (function solve_ed) 139 | function solve_uc(g_max, g_min, c_g, c_w, d, w_f) 140 | #Define the unit commitment (UC) model 141 | uc = Model(with_optimizer(GLPK.Optimizer)) 142 | 143 | # Define decision variables 144 | @variable(uc, 0 <= g[i=1:2] <= g_max[i]) # power output of generators 145 | @variable(uc, u[i = 1:2], Bin) # Binary status of generators 146 | @variable(uc, 0 <= w <= w_f ) # wind power injection 147 | 148 | # Define the objective function 149 | @objective(uc, Min, dot(c_g, g) + c_w * w) 150 | 151 | # Define the constraint on the maximum and minimum power output of each generator 152 | @constraint(uc, [i = 1:2], g[i] <= g_max[i]) #maximum 153 | @constraint(uc, [i = 1:2], g[i] >= g_min[i]) #minimum 154 | 155 | # Define the constraint on the wind power injection 156 | @constraint(uc, w <= w_f) 157 | 158 | # Define the power balance constraint 159 | @constraint(uc, sum(g) + w == d) 160 | 161 | # Solve statement 162 | optimize!(uc) 163 | 164 | status = termination_status(uc) 165 | if status != MOI.OPTIMAL 166 | return status, zeros(length(g)), 0.0, 0.0, zeros(length(u)), Inf 167 | end 168 | return status, value.(g), value(w), w_f - value(w), value.(u), objective_value(uc) 169 | end 170 | 171 | # Solve the economic dispatch problem 172 | status, g_opt, w_opt, ws_opt, u_opt, obj = solve_uc(g_max, g_min, c_g, c_w, d, w_f); 173 | 174 | println("\n") 175 | println("Dispatch of Generators: ", g_opt[:], " MW") 176 | println("Commitments of Generators: ", u_opt[:]) 177 | println("Dispatch of Wind: ", w_opt, " MW") 178 | println("Wind spillage: ", w_f - w_opt, " MW") 179 | println("\n") 180 | println("Total cost: ", obj, "\$") 181 | 182 | 183 | uc_df = DataFrame(Symbol("Commitment of Generator 1(MW)") => Float64[], 184 | Symbol("Commitment of Generator 2(MW)") => Float64[], 185 | Symbol("Dispatch of Generator 1(MW)") => Float64[], 186 | Symbol("Dispatch of Generator 2(MW)") => Float64[], 187 | Symbol("Dispatch of Wind(MW)") => Float64[], 188 | Symbol("Spillage of Wind(MW)") => Float64[], 189 | Symbol("Total cost(\$)") => Float64[]) 190 | 191 | for demandscale = 0.2:0.1:1.5 192 | status, g_opt, w_opt, ws_opt, u_opt, obj = solve_uc(g_max, g_min, c_g, c_w, demandscale*d, w_f) 193 | 194 | if status == MOI.OPTIMAL 195 | push!(uc_df, (u_opt[1], u_opt[2], g_opt[1], g_opt[2], w_opt, ws_opt, obj)) 196 | else 197 | println("Status: $status for demandscale = $demandscale \n") 198 | end 199 | end 200 | 201 | 202 | uc_df 203 | 204 | -------------------------------------------------------------------------------- /script/introduction/variables_constraints_objective.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Variables, Constraints and Objective 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' While the last tutorial introduced you to basics of of JuMP code, this tutorial will go in depth focusing on how to work 8 | #' with different parts of a JuMP program. 9 | 10 | using JuMP 11 | model = Model(); 12 | 13 | #' ## Variables 14 | 15 | #' ### Variable Bounds 16 | #' All of the variables we have created till now have had a bound. We can also create a free variable. 17 | 18 | @variable(model, free_x) 19 | 20 | #' While creating a variable, instead of using the <= and >= syntax, we can also use the `lower_bound` and `upper_bound` keyword arguments. 21 | 22 | @variable(model, keyword_x, lower_bound = 1, upper_bound = 2) 23 | 24 | #' We can query whether a variable has a bound using the `has_lower_bound` and `has_upper_bound` functions. The values of the bound can be obtained 25 | #' using the `lower_bound` and `upper_bound` functions. 26 | 27 | has_upper_bound(keyword_x) 28 | 29 | #+ 30 | 31 | upper_bound(keyword_x) 32 | 33 | #' Note querying the value of a bound that does not exist will result in an error. 34 | #+ tangle = false 35 | 36 | lower_bound(free_x) 37 | 38 | #' JuMP also allows us to change the bounds on variable. We will learn this in the problem modification tutorial. 39 | 40 | #' ### Containers 41 | #' We have already seen how to add a single variable to a model using the `@variable` macro. Let's now look at more ways to add 42 | #' variables to a JuMP model. JuMP provides data structures for adding collections of variables to a model. These data 43 | #' structures are reffered to as Containers and are of three types - `Arrays`, `DenseAxisArrays`, and `SparseAxisArrays`. 44 | 45 | #' #### Arrays 46 | #' JuMP arrays are created in a similar syntax to Julia arrays with the addition of specifying that the indices start with 1. If 47 | #' we do not tell JuMP that the indices start at 1, it will create a DenseAxisArray instead. 48 | 49 | @variable(model, a[1:2, 1:2]) 50 | 51 | #' An n-dimensional variable $x \in {R}^n$ having a bound $l \preceq x \preceq u$ ($l, u \in {R}^n$) is added in the following 52 | #' manner. 53 | 54 | n = 10 55 | l = [1; 2; 3; 4; 5; 6; 7; 8; 9; 10] 56 | u = [10; 11; 12; 13; 14; 15; 16; 17; 18; 19] 57 | 58 | @variable(model, l[i] <= x[i = 1:n] <= u[i]) 59 | 60 | #' Note that while working with Containers, we can also create variable bounds depending upon the indices 61 | 62 | @variable(model, y[i = 1:2, j = 1:2] >= 2i + j) 63 | 64 | #' #### DenseAxisArrays 65 | #' DenseAxisArrays are used when the required indices are not one-based integer ranges. The syntax is similar except with an 66 | #' arbitrary vector as an index as opposed to a one-based range. 67 | 68 | #' An example where the indices are integers but do not start with one. 69 | 70 | @variable(model, z[i = 2:3, j = 1:2:3] >= 0) 71 | 72 | #' Another example where the indices are an arbitrary vector. 73 | 74 | @variable(model, w[1:5,["red", "blue"]] <= 1) 75 | 76 | #' #### SparseAxisArrays 77 | #' SparseAxisArrays are created when the indices do not form a rectangular set. For example, this applies when indices have a 78 | #' dependence upon previous indices (called triangular indexing). 79 | 80 | @variable(model, u[i = 1:3, j = i:5]) 81 | 82 | #' We can also conditionally create variables by adding a comparison check that depends upon the named indices and is separated 83 | #' from the indices by a semi-colon (;). 84 | 85 | @variable(model, v[i = 1:9; mod(i, 3) == 0]) 86 | 87 | #' ### Variable Types 88 | 89 | #' The last arguement to the `@variable` macro is usually the variable type. Here we'll look at how to specifiy he variable type. 90 | 91 | #' #### Integer Variables 92 | #' Integer optimization variables are constrained to the set $x \in {Z}$ 93 | #+ tangle = false 94 | 95 | @variable(model, integer_x, Int) 96 | 97 | #' or 98 | 99 | @variable(model, integer_z, integer = true) 100 | 101 | #' #### Binary Variables 102 | #' Binary optimization variables are constrained to the set $x \in \{0, 1\}$. 103 | #+ tangle = false 104 | 105 | @variable(model, binary_x, Bin) 106 | 107 | #' or 108 | 109 | @variable(model, binary_z, binary = true) 110 | 111 | #' #### Semidefinite variables 112 | #' JuMP also supports modeling with semidefinite variables. A square symmetric matrix X is positive semidefinite if all eigenvalues 113 | #' are nonnegative. 114 | 115 | @variable(model, psd_x[1:2, 1:2], PSD) 116 | 117 | #' We can also impose a weaker constraint that the square matrix is only symmetric (instead of positive semidefinite) as follows: 118 | 119 | @variable(model, sym_x[1:2, 1:2], Symmetric) 120 | 121 | #' ## Constraints 122 | 123 | model = Model() 124 | @variable(model, x) 125 | @variable(model, y) 126 | @variable(model, z[1:10]); 127 | 128 | #' ### Constraint References 129 | #' While calling the `@constraint` macro, we can also set up a constraint reference. Such a refference is useful for obtaining 130 | #' additional information about the constraint such as its dual. 131 | 132 | @constraint(model, con, x <= 4) 133 | 134 | #' ### Containers 135 | #' Just as we had containers for variables, JuMP also provides `Arrays`, `DenseAxisArrays`, and `SparseAxisArrays` for storing 136 | #' collections of constraints. Examples for each container type are given below. 137 | 138 | #' #### Arrays 139 | 140 | @constraint(model, [i = 1:3], i * x <= i + 1) 141 | 142 | #' #### DenseAxisArrays 143 | 144 | @constraint(model, [i = 1:2, j = 2:3], i * x <= j + 1) 145 | 146 | #' #### SparseAxisArrays 147 | 148 | @constraint(model, [i = 1:2, j = 1:2; i != j], i * x <= j + 1) 149 | 150 | #' ### Constraints in a Loop 151 | #' We can add constraints using regular Julia loops 152 | 153 | for i in 1:3 154 | @constraint(model, 6x + 4y >= 5i) 155 | end 156 | 157 | #' or use for each loops inside the `@constraint` macro. 158 | 159 | @constraint(model, [i in 1:3], 6x + 4y >= 5i) 160 | 161 | #' We can also create constraints such as $\sum _{i = 1}^{10} z_i \leq 1$ 162 | 163 | @constraint(model, sum(z[i] for i in 1:10) <= 1) 164 | 165 | #' ## Objective 166 | #' While the recommended way to set the objective is with the @objective macro, the functions `set_objective_sense` and 167 | #' `set_objective_function` provide an equivalent lower-level interface. 168 | 169 | using GLPK 170 | 171 | model = Model(with_optimizer(GLPK.Optimizer)) 172 | @variable(model, x >= 0) 173 | @variable(model, y >= 0) 174 | set_objective_sense(model, MOI.MIN_SENSE) 175 | set_objective_function(model, x + y) 176 | 177 | optimize!(model) 178 | 179 | @show objective_value(model); 180 | 181 | #' To query the objective function from a model, we use the `objective_sense`, `objective_function`, and `objective_function_type` 182 | #' functions. 183 | 184 | objective_sense(model) 185 | 186 | #+ 187 | 188 | objective_function(model) 189 | 190 | #+ 191 | 192 | objective_function_type(model) 193 | 194 | #' # Vectorized Constraints and Objective 195 | #' We can also add constraints and objective to JuMP using vectorized linear algebra. We'll illustrate this by solving an LP in 196 | #' standard form i.e. 197 | 198 | #' $$ 199 | #' \begin{align*} 200 | #' & \min & c^T x \\ 201 | #' & \;\;\text{s.t.} & A x = b \\ 202 | #' & & x \succeq 0 \\ 203 | #' & & x \in \mathbb{R}^n 204 | #' \end{align*} 205 | #' $$ 206 | 207 | vector_model = Model(with_optimizer(GLPK.Optimizer)) 208 | 209 | A= [ 1 1 9 5; 210 | 3 5 0 8; 211 | 2 0 6 13] 212 | 213 | b = [7; 3; 5] 214 | 215 | c = [1; 3; 5; 2] 216 | 217 | @variable(vector_model, x[1:4] >= 0) 218 | @constraint(vector_model, A * x .== b) 219 | @objective(vector_model, Min, c' * x) 220 | 221 | optimize!(vector_model) 222 | 223 | @show objective_value(vector_model); -------------------------------------------------------------------------------- /script/introduction/solvers_and_solutions.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Solvers and Solutions 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' The purpose of this part of the tutorial is to introduce you to solvers and how to use them with JuMP. We'll also learn 8 | #' what to do with a problem after the solver has finished optimizing it. 9 | 10 | #' ## What is a Solver? 11 | #' A solver is a software package that incorporates algorithms for finding solutions to one or more classes of problem. 12 | #' For example, GLPK, which we used in the previous tutorials is a solver for linear programming (LP) and mixed integer 13 | #' programming (MIP) problems. It incorporates algorithms such as the simplex method, interior-point method etc. JuMP 14 | #' currently supports a number of open-source and commercial solvers which can be viewed 15 | #' [here](http://www.juliaopt.org/JuMP.jl/v0.19.1/installation/#Getting-Solvers-1). 16 | 17 | #' ## What is MathOptInterface? 18 | #' Each mathematical optimization solver API has its own concepts and data structures for representing optimization models 19 | #' and obtaining results. However, it is often desirable to represent an instance of an optimization problem at a higher 20 | #' level so that it is easy to try using different solvers. MathOptInterface (MOI) is an abstraction layer designed to provide 21 | #' an interface to mathematical optimization solvers so that users do not need to understand multiple solver-specific 22 | #' APIs. MOI can be used directly, or through a higher-level modeling interface like JuMP. 23 | 24 | #' Note that JuMP reexports MathOptInterface and 25 | #' you can use the shortcut MOI to refer to MathOptInterface in your code. 26 | 27 | #' ## Interacting with solvers 28 | #' JuMP models can be created in three different modes: `AUTOMATIC`, `MANUAL` and `DIRECT`. 29 | #' We'll use the following LP to illustrate them. 30 | 31 | #' $$ 32 | #' \begin{align*} 33 | #' & \max_{x,y} & x + 2y \\ 34 | #' & \;\;\text{s.t.} & x + y &\leq 1 \\ 35 | #' & & 0\leq x, y &\leq 1 \\ 36 | #' \end{align*} 37 | #' $$ 38 | 39 | using JuMP 40 | using GLPK 41 | 42 | #' ### `AUTOMATIC` Mode 43 | #' #### With Optimizer 44 | #' This is the easiest method to use a solver in JuMP. In order to do so, we simply set the solver inside the Model constructor. 45 | 46 | model_auto = Model(with_optimizer(GLPK.Optimizer)) 47 | @variable(model_auto, 0 <= x <= 1) 48 | @variable(model_auto, 0 <= y <= 1) 49 | @constraint(model_auto, x + y <= 1) 50 | @objective(model_auto, Max, x + 2y) 51 | optimize!(model_auto) 52 | objective_value(model_auto) 53 | 54 | #' #### No Optimizer (at first) 55 | #' It is also possible to create a JuMP model with no optimizer attached. After the model object is initialized empty 56 | #' and all its variables, constraints and objective are set, then we can attach the solver at `optimize!` time. 57 | 58 | model_auto_no = Model() 59 | @variable(model_auto_no, 0 <= x <= 1) 60 | @variable(model_auto_no, 0 <= y <= 1) 61 | @constraint(model_auto_no, x + y <= 1) 62 | @objective(model_auto_no, Max, x + 2y) 63 | optimize!(model_auto_no, with_optimizer(GLPK.Optimizer)) 64 | objective_value(model_auto_no) 65 | 66 | #' Note that we can also enforce the automatic mode by passing `caching_mode = MOIU.AUTOMATIC` in the Model function call. 67 | #' ### `MANUAL` Mode 68 | #' This mode is similar to the `AUTOMATIC` mode, but there are less protections from the user getting errors from the solver 69 | #' API. On the other side, nothing happens silently, which might give the user more control. It requires attaching the solver 70 | #' before the solve step using the `MOIU.attach_optimizer()` function. 71 | 72 | model_manual = Model(with_optimizer(GLPK.Optimizer),caching_mode = MOIU.MANUAL) 73 | @variable(model_manual, 0 <= x <= 1) 74 | @variable(model_manual, 0 <= y <= 1) 75 | @constraint(model_manual, x + y <= 1) 76 | @objective(model_manual, Max, x + 2y) 77 | MOIU.attach_optimizer(model_manual) 78 | optimize!(model_manual) 79 | objective_value(model_manual) 80 | 81 | #' ### `DIRECT` Mode 82 | #' Some solvers are able to handle the problem data directly. This is common for LP/MIP solver but not very common for 83 | #' open-source conic solvers. In this case we do not set a optimizer, we set a backend which is more generic and is able 84 | #' to hold data and not only solving a model. 85 | 86 | model_direct = direct_model(GLPK.Optimizer()) 87 | @variable(model_direct, 0 <= x <= 1) 88 | @variable(model_direct, 0 <= y <= 1) 89 | @constraint(model_direct, x + y <= 1) 90 | @objective(model_direct, Max, x + 2y) 91 | optimize!(model_direct) 92 | objective_value(model_direct) 93 | 94 | #' ### Solver Options 95 | #' Many of the solvers also allow options to be passed in. However, these options are solver-specific. To find out the various 96 | #' options available, please check out the individual solver packages. Some examples for the CBC solver are given below. 97 | 98 | using Cbc 99 | 100 | #' To turn off printing (i.e. silence the solver), 101 | 102 | model = Model(with_optimizer(Cbc.Optimizer, logLevel = 0)); 103 | 104 | #' To increase the maximum number of iterations 105 | 106 | model = Model(with_optimizer(Cbc.Optimizer, max_iters = 10000)); 107 | 108 | #' To set the solution timeout limit 109 | 110 | model = Model(with_optimizer(Cbc.Optimizer, seconds = 5)); 111 | 112 | #' ## Querying Solutions 113 | #' So far we have seen all the elements and constructs related to writing a JuMP optimization model. In this section we 114 | #' reach the point of what to do with a solved problem. JuMP follows closely the concepts defined in MathOptInterface to 115 | #' answer user questions about a finished call to `optimize!(model)`. The three main steps in querying a solution are 116 | #' given below. We'll use the model we created in `AUTOMATIC` mode with an optimizer attached in this section. 117 | #' ### Termination Statuses 118 | #' Termination statuses are meant to explain the reason why the optimizer stopped executing in the most recent call 119 | #' to `optimize!`. 120 | 121 | termination_status(model_auto) 122 | 123 | #' You can view the different termination status codes by referring to the docs or though checking the possible types using 124 | #' the below command. 125 | 126 | display(typeof(MOI.OPTIMAL)) 127 | 128 | #' ### Solution Statuses 129 | #' These statuses indicate what kind of result is available to be queried from the model. It's possible that no result is 130 | #' available to be queried. We shall discuss more on the dual status and solutions in the Duality tutorial. 131 | 132 | primal_status(model_auto) 133 | 134 | #+ 135 | 136 | dual_status(model_auto) 137 | 138 | #' As we saw before, the result (solution) status codes can be viewed directly from Julia. 139 | 140 | display(typeof(MOI.FEASIBLE_POINT)) 141 | 142 | #' ### Obtaining Solutions 143 | #' Provided the primal status is not `MOI.NO_SOLUTION`, we can inspect the solution values and optimal cost. 144 | 145 | @show value(x) 146 | @show value(y) 147 | @show objective_value(model_auto) 148 | 149 | #' Since it is possible that no solution is available to be queried from the model, calls to `value` may throw errors. 150 | #' Hence, it is recommended to check for the presence of solutions. 151 | #+ tangle = false 152 | 153 | model_no_solution = Model(with_optimizer(GLPK.Optimizer)) 154 | @variable(model_no_solution, 0 <= x <= 1) 155 | @variable(model_no_solution, 0 <= y <= 1) 156 | @constraint(model_no_solution, x + y >= 3) 157 | @objective(model_no_solution, Max, x + 2y) 158 | 159 | optimize!(model_no_solution) 160 | 161 | if termination_status(model_no_solution) == MOI.OPTIMAL 162 | optimal_solution = value(x) 163 | optimal_objective = objective_value(model_no_solution) 164 | elseif termination_status(model_no_solution) == MOI.TIME_LIMIT && has_values(model_no_solution) 165 | suboptimal_solution = value(x) 166 | suboptimal_objective = objective_value(model_no_solution) 167 | else 168 | error("The model was not solved correctly.") 169 | end -------------------------------------------------------------------------------- /notebook/modelling/img/g2.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | %3 11 | 12 | 13 | j1 14 | 15 | j 16 | 1 17 | 18 | 19 | j2 20 | 21 | j 22 | 2 23 | 24 | 25 | j3 26 | 27 | j 28 | 3 29 | 30 | 31 | j4 32 | 33 | j 34 | 4 35 | 36 | 37 | 1 38 | 39 | 1 40 | 41 | 42 | 1->j1 43 | 44 | 45 | 6 46 | 47 | 48 | 1->j2 49 | 50 | 51 | 4 52 | 53 | 54 | 1->j3 55 | 56 | 57 | 5 58 | 59 | 60 | 2 61 | 62 | 2 63 | 64 | 65 | 2->j2 66 | 67 | 68 | 3 69 | 70 | 71 | 2->j3 72 | 73 | 74 | 6 75 | 76 | 77 | 3 78 | 79 | 3 80 | 81 | 82 | 3->j1 83 | 84 | 85 | 5 86 | 87 | 88 | 3->j3 89 | 90 | 91 | 4 92 | 93 | 94 | 3->j4 95 | 96 | 97 | 3 98 | 99 | 100 | 4 101 | 102 | 4 103 | 104 | 105 | 4->j1 106 | 107 | 108 | 7 109 | 110 | 111 | 4->j2 112 | 113 | 114 | 5 115 | 116 | 117 | 4->j3 118 | 119 | 120 | 5 121 | 122 | 123 | 4->j4 124 | 125 | 126 | 5 127 | 128 | 129 | 130 | -------------------------------------------------------------------------------- /notebook/modelling/sudoku.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Sudoku" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "**Originally Contributed by**: Iain Dunning" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": {}, 20 | "source": [ 21 | "\"Partially\n", 22 | "

A partially solved Sudoku puzzle

" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "[Sudoku](http://en.wikipedia.org/wiki/Sudoku) is a popular number puzzle. The goal is to place the digits 1,...,9 on a nine-by-nine grid, with some of the digits already filled in. Your solution must satisfy the following rules:\n", 30 | "\n", 31 | "* The numbers 1 to 9 must appear in each 3x3 square\n", 32 | "* The numbers 1 to 9 must appear in each row\n", 33 | "* The numbers 1 to 9 must appear in each column\n", 34 | "\n", 35 | "This isn't an optimization problem, its actually a *feasibility* problem: we wish to find a feasible solution that satsifies these rules. You can think of it as an optimization problem with an objective of 0.\n", 36 | "\n", 37 | "We can model this problem using 0-1 integer programming: a problem where all the decision variables are binary. We'll use JuMP to create the model, and then we can solve it with any integer programming solver." 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 1, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "using JuMP\n", 47 | "using GLPK" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "We will define a binary variable (a variable that is either 0 or 1) for each possible number in each possible cell. The meaning of each variable is as follows:\n", 55 | "\n", 56 | " x[i,j,k] = 1 if and only if cell (i,j) has number k\n", 57 | "\n", 58 | "where `i` is the row and `j` is the column." 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 2, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "# Create a model\n", 68 | "sudoku = Model(with_optimizer(GLPK.Optimizer))\n", 69 | "\n", 70 | "# Create our variables\n", 71 | "@variable(sudoku, x[i=1:9, j=1:9, k=1:9], Bin);" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": {}, 77 | "source": [ 78 | "Now we can begin to add our constraints. We'll actually start with something obvious to us as humans, but what we need to enforce: that there can be only one number per cell." 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 3, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "for i = 1:9, j = 1:9 # Each row and each column\n", 88 | " # Sum across all the possible digits\n", 89 | " # One and only one of the digits can be in this cell, \n", 90 | " # so the sum must be equal to one\n", 91 | " @constraint(sudoku, sum(x[i,j,k] for k in 1:9) == 1)\n", 92 | "end" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "Next we'll add the constraints for the rows and the columns. These constraints are all very similar, so much so that we can actually add them at the same time." 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 4, 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "for ind = 1:9 # Each row, OR each column\n", 109 | " for k = 1:9 # Each digit\n", 110 | " # Sum across columns (j) - row constraint\n", 111 | " @constraint(sudoku, sum(x[ind,j,k] for j in 1:9) == 1)\n", 112 | " # Sum across rows (i) - column constraint\n", 113 | " @constraint(sudoku, sum(x[i,ind,k] for i in 1:9) == 1)\n", 114 | " end\n", 115 | "end" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "Finally, we have the to enforce the constraint that each digit appears once in each of the nine 3x3 sub-grids. Our strategy will be to index over the top-left corners of each 3x3 square with `for` loops, then sum over the squares." 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 5, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "for i = 1:3:7, j = 1:3:7, k = 1:9\n", 132 | " # i is the top left row, j is the top left column\n", 133 | " # We'll sum from i to i+2, e.g. i=4, r=4, 5, 6\n", 134 | " @constraint(sudoku, sum(x[r,c,k] for r in i:i+2, c in j:j+2) == 1)\n", 135 | "end" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "The final step is to add the initial solution as a set of constraints. We'll solve the problem that is in the picture at the start of the notebook. We'll put a `0` if there is no digit in that location." 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 6, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "9×9 Array{Int64,2}:\n", 154 | " 5 3 4 6 7 8 9 1 2\n", 155 | " 6 7 2 1 9 5 3 4 8\n", 156 | " 1 9 8 3 4 2 5 6 7\n", 157 | " 8 5 9 7 6 1 4 2 3\n", 158 | " 4 2 6 8 5 3 7 9 1\n", 159 | " 7 1 3 9 2 4 8 5 6\n", 160 | " 9 6 1 5 3 7 2 8 4\n", 161 | " 2 8 7 4 1 9 6 3 5\n", 162 | " 3 4 5 2 8 6 1 7 9" 163 | ] 164 | }, 165 | "execution_count": 6, 166 | "metadata": {}, 167 | "output_type": "execute_result" 168 | } 169 | ], 170 | "source": [ 171 | "# The given digits\n", 172 | "init_sol = [ 5 3 0 0 7 0 0 0 0;\n", 173 | " 6 0 0 1 9 5 0 0 0;\n", 174 | " 0 9 8 0 0 0 0 6 0;\n", 175 | " 8 0 0 0 6 0 0 0 3;\n", 176 | " 4 0 0 8 0 3 0 0 1;\n", 177 | " 7 0 0 0 2 0 0 0 6;\n", 178 | " 0 6 0 0 0 0 2 8 0;\n", 179 | " 0 0 0 4 1 9 0 0 5;\n", 180 | " 0 0 0 0 8 0 0 7 9]\n", 181 | "for i = 1:9, j = 1:9\n", 182 | " # If the space isn't empty\n", 183 | " if init_sol[i,j] != 0\n", 184 | " # Then the corresponding variable for that digit\n", 185 | " # and location must be 1\n", 186 | " @constraint(sudoku, x[i,j,init_sol[i,j]] == 1)\n", 187 | " end\n", 188 | "end\n", 189 | "\n", 190 | "# solve problem\n", 191 | "optimize!(sudoku)\n", 192 | "\n", 193 | "# Extract the values of x\n", 194 | "x_val = value.(x)\n", 195 | "# Create a matrix to store the solution\n", 196 | "sol = zeros(Int,9,9) # 9x9 matrix of integers\n", 197 | "for i in 1:9, j in 1:9, k in 1:9\n", 198 | " # Integer programs are solved as a series of linear programs\n", 199 | " # so the values might not be precisely 0 and 1. We can just\n", 200 | " # round them to the nearest integer to make it easier\n", 201 | " if round(Int,x_val[i,j,k]) == 1\n", 202 | " sol[i,j] = k\n", 203 | " end\n", 204 | "end\n", 205 | "# Display the solution\n", 206 | "sol" 207 | ] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "metadata": {}, 212 | "source": [ 213 | "Which is the correct solution:\n", 214 | "\"Fully" 215 | ] 216 | } 217 | ], 218 | "metadata": { 219 | "kernelspec": { 220 | "display_name": "Julia 1.0.3", 221 | "language": "julia", 222 | "name": "julia-1.0" 223 | }, 224 | "language_info": { 225 | "file_extension": ".jl", 226 | "mimetype": "application/julia", 227 | "name": "julia", 228 | "version": "1.0.3" 229 | } 230 | }, 231 | "nbformat": 4, 232 | "nbformat_minor": 2 233 | } 234 | -------------------------------------------------------------------------------- /script/using_JuMP/working_with_data_files.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Working with Data Files 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' In many cases we might need to read data available in an external file rather than type it into Julia ourselves. 8 | #' This tutorial is concerned with reading tabular data into Julia and using it for a JuMP model. 9 | #' We'll be reading data using the DataFrames.jl package and some other packages specific to file types. 10 | 11 | #' Note: There are multiple ways to read the same kind of data intto Julia. 12 | #' However, this tutorial only focuses on DataFrames.jl as 13 | #' it provides the ecosystem to work with most of the required file types in a straightforward manner. 14 | 15 | #' ### DataFrames.jl 16 | 17 | #' The DataFrames package provides a set of tools for working with tabular data. 18 | #' It is available through the Julia package system. 19 | #+ tangle = false 20 | 21 | using Pkg 22 | Pkg.add("DataFrames") 23 | 24 | #' ### What is a DataFrame? 25 | 26 | #' A DataFrame is a data structure like a table or spreadsheet. You can use it for storing and exploring a set of related data values. 27 | #' Think of it as a smarter array for holding tabular data. 28 | 29 | #' ## Reading Tabular Data into a DataFrame 30 | #' We will begin by reading data from different file formats into a DataFrame object. 31 | #' The example files that we will be reading are present in the data folder. 32 | 33 | #' ### Excel Sheets 34 | #' Excel files can be read using the ExcelFiles.jl package. 35 | #+ tangle = false 36 | 37 | Pkg.add("XLSX") 38 | 39 | #' To read a Excel file into a DataFrame, we use the following julia code. 40 | #' The first arguement to the `readtable` function is the file to be read and the second arguement is the name of the sheet. 41 | 42 | using DataFrames 43 | using XLSX 44 | 45 | #+ 46 | 47 | data_dir = joinpath(@__DIR__, "data") 48 | excel_df = DataFrame(XLSX.readtable(joinpath(data_dir, "SalesData.xlsx"), "SalesOrders")...) 49 | 50 | #' ### CSV Files 51 | #' CSV and other delimited text files can be read the CSV.jl package. 52 | #+ tangle = false 53 | 54 | Pkg.add("CSV") 55 | 56 | #' To read a CSV file into a DataFrame, we use the `CSV.read` function. 57 | 58 | using CSV 59 | csv_df = CSV.read(joinpath(data_dir, "StarWars.csv")) 60 | 61 | #' ### Other Delimited Files 62 | #' We can also use the CSV.jl package to read any other delimited text file format. 63 | #' By default, CSV.File will try to detect a file's delimiter from the first 10 lines of the file. 64 | #' Candidate delimiters include `','`, `'\t'`, `' '`, `'|'`, `';'`, and `':'`. If it can't auto-detect the delimiter, it will assume `','`. 65 | #' Let's take the example of space separated data. 66 | 67 | ss_df = CSV.read(joinpath(data_dir, "Cereal.txt")) 68 | 69 | #' We can also specify the delimiter by passing the `delim` arguement. 70 | 71 | delim_df = CSV.read(joinpath(data_dir, "Soccer.txt"), delim = "::") 72 | 73 | #' Note that by default, are read-only. If we wish to make changes to the data read, we pass the `copycols = true` arguement in the function call. 74 | #+ tangle = false 75 | 76 | ss_df = CSV.read(joinpath(data_dir, "Cereal.txt"), copycols = true) 77 | 78 | 79 | #' ## Working with DataFrames 80 | #' Now that we have read the required data into a DataFrame, let us look at some basic operations we can perform on it. 81 | 82 | #' ### Querying Basic Information 83 | #' The `size` function gets us the dimensions of the DataFrame. 84 | 85 | size(ss_df) 86 | 87 | #' We can also us the `nrow` and `ncol` functions to get the number of rows and columns respectively. 88 | 89 | nrow(ss_df), ncol(ss_df) 90 | 91 | #' The `describe` function gives basic summary statistics of data in a DataFrame. 92 | 93 | describe(ss_df) 94 | 95 | #' Names of every column can be obtained by the `names` function. 96 | 97 | names(ss_df) 98 | 99 | #' Corresponding data types are obtained using the broadcasted `eltype` function. 100 | 101 | eltype.(ss_df) 102 | 103 | #' ### Accessing the Data 104 | #' Similar to regular arrays, we use numerical indexing to access elements of a DataFrame. 105 | 106 | csv_df[1,1] 107 | 108 | #' The following are different ways to access a column. 109 | 110 | csv_df[!, 1] 111 | 112 | #+ 113 | 114 | csv_df[!, :Name] 115 | 116 | #+ 117 | 118 | csv_df.Name 119 | 120 | #+ 121 | 122 | csv_df[:, 1] # note that this creates a copy 123 | 124 | #' The following are different ways to access a row. 125 | 126 | csv_df[1:1, :] 127 | 128 | #+ 129 | 130 | csv_df[1, :] # this produces a DataFrameRow 131 | 132 | #' We can change the values just as we normally assign values. 133 | 134 | #' Assign a range to scalar. 135 | 136 | excel_df[1:3, 5] .= 1 137 | 138 | #' Vector to equal length vector. 139 | 140 | excel_df[4:6, 5] = [4, 5, 6] 141 | 142 | #' Subset of the DataFrame to another data frame of matching size. 143 | 144 | excel_df[1:2, 6:7] = DataFrame([-2 -2; -2 -2], [Symbol("Unit Cost"), :Total]) 145 | 146 | #+ 147 | 148 | excel_df 149 | 150 | #' There are a lot more things which can be done with a DataFrame. 151 | #' See the [docs](https://juliadata.github.io/DataFrames.jl/stable/) for more information. 152 | 153 | #' ## A Complete Modelling Example - Passport Problem 154 | 155 | #' Let's now apply what we have learnt to solve a real modelling problem. 156 | 157 | #' The [Passport Index Dataset](https://github.com/ilyankou/passport-index-dataset) 158 | #' lists travel visa requirements for 199 countries, in .csv format. 159 | #' Our task is to find out the minimum number of passports required to visit all countries. 160 | 161 | #' In this dataset, the first column represents a passport (=from) and each remaining column represents a foreign country (=to). 162 | #' The values in each cell are as follows: 163 | #' * 3 = visa-free travel 164 | #' * 2 = eTA is required 165 | #' * 1 = visa can be obtained on arrival 166 | #' * 0 = visa is required 167 | #' * -1 is for all instances where passport and destination are the same 168 | 169 | #' Our task is to find out the minimum number of passports needed to visit every country without requiring a visa. 170 | #' Thus, the values we are interested in are -1 and 3. Let us modify the data in the following manner - 171 | 172 | passportdata = CSV.read(joinpath(data_dir, "passport-index-matrix.csv"), copycols = true) 173 | 174 | for i in 1:nrow(passportdata) 175 | for j in 2:ncol(passportdata) 176 | if passportdata[i,j] == -1 || passportdata[i,j] == 3 177 | passportdata[i,j] = 1 178 | else 179 | passportdata[i,j] = 0 180 | end 181 | end 182 | end 183 | 184 | #' The values in the cells now represent: 185 | #' * 1 = no visa required for travel 186 | #' * 0 = visa required for travel 187 | 188 | #' Let us assossciate each passport with a decision variable $pass_{cntr}$ for each country. 189 | #' We want to minize the sum $\sum pass_{cntr}$ over all countries. 190 | 191 | #' Since we wish to visit all the countries, for every country, 192 | #' we should own atleast one passport that lets us travel to that country visa free. 193 | #' For one destination, this can be mathematically represented as $\sum_{cntr \in world} passportdata_{cntr,dest} \cdot pass_{cntr} \geq 1$. 194 | 195 | #' Thus, we can represent this problem using the following model: 196 | 197 | #' $$ 198 | #' \begin{align*} 199 | #' \min && \sum_{cntr \in World} pass_{cntr} \\ 200 | #' \text{s.t.} && \sum_{cntr \in World} passportdata_{cntr,dest} \cdot pass_{cntr} \geq 1 && \forall dest \in World \\ 201 | #' && pass_{cntr} \in \{0,1\} && \forall cntr \in World 202 | #' \end{align*} 203 | #' $$ 204 | 205 | #' We'll now solve the problem using JuMP. 206 | 207 | using JuMP, GLPK 208 | 209 | # Finding number of countries 210 | n = ncol(passportdata) - 1 # Subtract 1 for column representing country of passport 211 | 212 | model = Model(with_optimizer(GLPK.Optimizer)) 213 | @variable(model, pass[1:n], Bin) 214 | @constraint(model, [j = 2:n], sum(passportdata[i,j] * pass[i] for i in 1:n) >= 1) 215 | @objective(model, Min, sum(pass)) 216 | optimize!(model) 217 | 218 | println("Minimum number of passports needed: ", objective_value(model)) 219 | 220 | #+ 221 | 222 | countryindex = findall(value.(pass) .== 1 ) 223 | 224 | print("Countries: ") 225 | for i in countryindex 226 | print(names(passportdata)[i+1], " ") 227 | end -------------------------------------------------------------------------------- /notebook/introduction/getting_started_with_JuMP.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Getting Started with JuMP" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "**Originally Contributed by**: Arpit Bhatia" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": {}, 20 | "source": [ 21 | "This tutorial is aimed at providing a quick introduction to writing JuMP code. It assumes familiar with basic optimization and \n", 22 | "the notion of an [AML](https://en.wikipedia.org/wiki/Algebraic_modeling_language)." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## What is JuMP?\n", 30 | "JuMP (\"Julia for Mathematical Programming\") is an open-source modeling language that is embedded in Julia. It allows users to \n", 31 | "users formulate various classes of optimization problems (linear, mixed-integer, quadratic, conic quadratic, semidefinite, \n", 32 | "and nonlinear) with easy-to-read code. These problems can then be solved using state-of-the-art open-source and commercial solvers.\n", 33 | "JuMP also makes advanced optimization techniques easily accessible from a high-level language." 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "## Installing JuMP\n", 41 | "JuMP is a package for Julia. From Julia, JuMP is installed by using the built-in package manager." 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 1, 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "name": "stdout", 51 | "output_type": "stream", 52 | "text": [ 53 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m registry at `~/.julia/registries/General`\n", 54 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m git-repo `https://github.com/JuliaRegistries/General.git`\n", 55 | "\u001b[?25l\u001b[2K\u001b[?25h\u001b[32m\u001b[1m Resolving\u001b[22m\u001b[39m package versions...\n", 56 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m `~/Desktop/Untitled Folder/JuMPTutorials.jl/Project.toml`\n", 57 | "\u001b[90m [no changes]\u001b[39m\n", 58 | "\u001b[32m\u001b[1m Updating\u001b[22m\u001b[39m `~/Desktop/Untitled Folder/JuMPTutorials.jl/Manifest.toml`\n", 59 | "\u001b[90m [no changes]\u001b[39m\n" 60 | ] 61 | } 62 | ], 63 | "source": [ 64 | "import Pkg\n", 65 | "Pkg.add(\"JuMP\")" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "## A Complete Example\n", 73 | "Let's try to solve the following linear programming problem by using JuMP and GLPK (a linear and mixed integer programming \n", 74 | "solver). We will first look at the complete code to solve the problem and then go through it step by step." 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "$$\n", 82 | "\\begin{align*}\n", 83 | "& \\min & 12x + 20y \\\\\n", 84 | "& \\;\\;\\text{s.t.} & 6x + 8y \\geq 100 \\\\\n", 85 | "& & 7x + 12y \\geq 120 \\\\ \n", 86 | "& & x \\geq 0 \\\\ \n", 87 | "& & y \\geq 0 \\\\ \n", 88 | "\\end{align*}\n", 89 | "$$" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 2, 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "value(x) = 14.999999999999993\n", 102 | "value(y) = 1.2500000000000047\n", 103 | "objective_value(model) = 205.0\n" 104 | ] 105 | } 106 | ], 107 | "source": [ 108 | "using JuMP\n", 109 | "using GLPK\n", 110 | "\n", 111 | "model = Model(with_optimizer(GLPK.Optimizer))\n", 112 | "@variable(model, x >= 0)\n", 113 | "@variable(model, y >= 0)\n", 114 | "@constraint(model, 6x + 8y >= 100)\n", 115 | "@constraint(model, 7x + 12y >= 120)\n", 116 | "@objective(model, Min, 12x + 20y)\n", 117 | "\n", 118 | "optimize!(model)\n", 119 | "\n", 120 | "@show value(x);\n", 121 | "@show value(y);\n", 122 | "@show objective_value(model);" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "## Step by Step JuMP Code\n", 130 | "Once JuMP is installed, to use JuMP in your programs, we just need to write-" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 3, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "using JuMP" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "We also need to include a Julia package which provides an appropriate solver. We want to use GLPK.Optimizer here which is \n", 147 | "provided by the GLPK.jl package." 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 4, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "using GLPK" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "metadata": {}, 162 | "source": [ 163 | "A model object is a container for variables, constraints, solver options, etc. Models are created with the Model() function. \n", 164 | "The `with_optimizer` syntax is used to specify the optimizer to be used which is GLPK in this case." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 5, 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "model = Model(with_optimizer(GLPK.Optimizer));" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "A variable is modelled using `@variable(name of the model object, variable name and bound, variable type)`. The bound can be a\n", 181 | "lower bound, an upper bound or both. If no variable type is defined, then it is treated as real." 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 6, 187 | "metadata": {}, 188 | "outputs": [], 189 | "source": [ 190 | "@variable(model, x >= 0)\n", 191 | "@variable(model, y >= 0);" 192 | ] 193 | }, 194 | { 195 | "cell_type": "markdown", 196 | "metadata": {}, 197 | "source": [ 198 | "A constraint is modelled using `@constraint(name of the model object, constraint)`." 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 7, 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "@constraint(model, 6x + 8y >= 100)\n", 208 | "@constraint(model, 7x + 12y >= 120);" 209 | ] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "metadata": {}, 214 | "source": [ 215 | "The objective is set in a similar manner using `@objective(name of the model object, Min/Max, function to be optimized)`" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 8, 221 | "metadata": {}, 222 | "outputs": [], 223 | "source": [ 224 | "@objective(model, Min, 12x + 20y);" 225 | ] 226 | }, 227 | { 228 | "cell_type": "markdown", 229 | "metadata": {}, 230 | "source": [ 231 | "To solve the optimization problem, we call the optimize function." 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 9, 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "optimize!(model)" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "Let's now check the value of objective and variables." 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 10, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "name": "stdout", 257 | "output_type": "stream", 258 | "text": [ 259 | "value(x) = 14.999999999999993\n", 260 | "value(y) = 1.2500000000000047\n", 261 | "objective_value(model) = 205.0\n" 262 | ] 263 | } 264 | ], 265 | "source": [ 266 | "@show value(x);\n", 267 | "@show value(y);\n", 268 | "@show objective_value(model);" 269 | ] 270 | } 271 | ], 272 | "metadata": { 273 | "kernelspec": { 274 | "display_name": "Julia 1.0.3", 275 | "language": "julia", 276 | "name": "julia-1.0" 277 | }, 278 | "language_info": { 279 | "file_extension": ".jl", 280 | "mimetype": "application/julia", 281 | "name": "julia", 282 | "version": "1.0.3" 283 | } 284 | }, 285 | "nbformat": 4, 286 | "nbformat_minor": 2 287 | } 288 | -------------------------------------------------------------------------------- /script/modelling/problems_on_graphs.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Problems on Graphs 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' In the mathematical discipline of graph theory, a number of problems can be solved by modelling them as optimization problems. 8 | #' We will see some examples of such problems in the tutorial. 9 | #' These problems are also sometimes referred to as combinatorial optimization problems as 10 | #' they consist of finding an optimal object from a finite set of objects. 11 | 12 | #' Let's first import all the packages we will be using for this tutorial. 13 | #' We'll be plotting all the graphs we work with in this tutorial for which we will need some additional packages. 14 | 15 | using JuMP 16 | using GLPK 17 | using GraphPlot 18 | using LightGraphs 19 | using Colors 20 | 21 | #' ## Representing Graphs 22 | #' For the purpose of this tutorial, we will represent graphs using adjacency matrices. 23 | #' An adjacency matrix, sometimes also called the connection matrix, is a square matrix used to represent a finite graph. 24 | #' Its rows and columns are labeled by the graph vertices, 25 | #' with a 1 or 0 in position ($v_{i}$,$v_{j}$) according to whether $v_{i}$ and $v_{j}$ are adjacent or not. 26 | 27 | #' ## Minimum Vertex Cover 28 | #' Given a graph $G = (V, E)$, a vertex-cover $V' \subset V$ of $G$ is a collection of vertices such that 29 | #' each edge in $E$ is incident to at least one of the vertices in $V'$. 30 | #' The size of a vertex-cover $|V'|$ is the number of vertices present in the cover. 31 | #' We wish to find the minimum vertex cover of $G$ i.e. a minimum size vertex cover. 32 | #' We model this problem as an ILP by defining a decision variable $y_{v}$ for each vertex $v \in V$ and 33 | #' a constraint for each edge $e \in E$ as follows: 34 | 35 | #' $$ 36 | #' \begin{align*} 37 | #' \min && \sum_{v \in V} y_{v} \\ 38 | #' s.t. && y_{u} + y_{v} \geq 1 && \forall \{u,v\} \in E \\ 39 | #' && y_{v} \in \{0,1\} && \forall v \in V 40 | #' \end{align*} 41 | #' $$ 42 | 43 | G = [ 44 | 0 1 0 0 0 0; 45 | 1 0 1 1 0 0; 46 | 0 1 0 0 1 1; 47 | 0 1 0 0 1 0; 48 | 0 0 1 1 0 0; 49 | 0 0 1 0 0 0 50 | ] 51 | 52 | g = SimpleGraph(G) 53 | 54 | gplot(g) 55 | 56 | #+ 57 | 58 | vertex_cover = Model(with_optimizer(GLPK.Optimizer)) 59 | 60 | @variable(vertex_cover, y[1:nv(g)], Bin) 61 | @constraint(vertex_cover, [i = 1:nv(g), j = 1:nv(g); G[i,j] == 1], y[i] + y[j] >= 1) 62 | @objective(vertex_cover, Min, sum(y)) 63 | 64 | optimize!(vertex_cover) 65 | @show value.(y); 66 | 67 | #+ 68 | 69 | membership = convert(Array{Int},value.(y)) # Change to Int 70 | membership = membership + ones(Int, nv(g)) # Make the color groups one indexed 71 | nodecolor = [colorant"red", colorant"blue"] # Blue to represent vertices in the cover 72 | nodefillc = nodecolor[membership] 73 | gplot(g, nodefillc = nodefillc) 74 | 75 | #' ## Dominating Set 76 | #' A dominating set in a graph $G = (V, E)$ is a set $S \subset V$ such that 77 | #' for each vertex $v \in V$ either $v$ or one of its neighbour should be in $S$. 78 | #' Note that for some vertex $u$, $u$ and its neighbour both can be present in $S$. 79 | #' We wish to find the smallest dominating set for a graph. 80 | #' We model this problem as an ILP by defining a decision variable $x_{v}$ for each vertex $v \in V$ along with 81 | #' a constraint for its neighbourhood. 82 | 83 | #' $$ 84 | #' \begin{align*} 85 | #' \min && \sum_{v \in V} x_{v} \\ 86 | #' s.t. && \sum_{u \in N(v)}x_{u} \geq 1 && \forall v \in V \\ 87 | #' && x_{v} \in \{0,1\} && \forall v \in V 88 | #' \end{align*} 89 | #' $$ 90 | 91 | G = [ 92 | 0 1 0 0 0 0 0 0 0 1 0 ; 93 | 1 0 1 0 0 0 0 0 0 0 1; 94 | 0 1 0 1 0 1 0 0 0 0 0; 95 | 0 0 1 0 1 0 0 0 0 0 0; 96 | 0 0 0 1 0 1 0 0 0 0 0; 97 | 0 0 1 0 1 0 1 0 0 0 0; 98 | 0 0 0 0 0 1 0 1 0 0 0; 99 | 0 0 0 0 0 0 1 0 1 0 1; 100 | 0 0 0 0 0 0 0 1 0 1 1; 101 | 1 0 0 0 0 0 0 0 1 0 1; 102 | 0 1 0 0 0 0 0 1 1 1 0 103 | ] 104 | 105 | g = SimpleGraph(G) 106 | 107 | gplot(g) 108 | 109 | #+ 110 | 111 | dominating_set = Model(with_optimizer(GLPK.Optimizer)) 112 | 113 | @variable(dominating_set, x[1:nv(g)], Bin) 114 | @constraint(dominating_set, [i = 1:nv(g)], sum(G[i,:] .* x) >= 1) 115 | @objective(dominating_set, Min, sum(x)) 116 | 117 | optimize!(dominating_set) 118 | @show value.(x); 119 | 120 | #+ 121 | 122 | membership = convert(Array{Int},value.(x)) # Change to Int 123 | membership = membership + ones(Int, nv(g)) # Make the color groups one indexed 124 | nodecolor = [colorant"red", colorant"blue"] # Blue to represent vertices in the set 125 | nodefillc = nodecolor[membership] 126 | gplot(g, nodefillc = nodefillc) 127 | 128 | 129 | #' ## Maximum Matching Problem 130 | #' Given a graph $G = (V, E)$, a matching $M \subset E$ of $G$ is a collection of vertex disjoint edges. 131 | #' The size of the matching $M$ is the number of edges present in $M$ i.e. $|M|$. 132 | #' We wish to find the Maximum matching of $G$ i.e. a matching of maximum size. 133 | #' We can solve this problem by modelling it as an integer linear program (ILP). 134 | #' We define a decision variable $m_{e}$ for each edge $e \in E$ and a constraint for each vertex $u \in V$ as follows: 135 | 136 | #' $$ 137 | #' \begin{align*} 138 | #' \max && \sum_{e \in E} m_{e} \\ 139 | #' s.t. && \sum_{e \sim u} m_{e} \leq 1 && \forall u \in V \\ 140 | #' && m_{e} \in \{0,1\} && \forall e \in E 141 | #' \end{align*} 142 | #' $$ 143 | 144 | #' Let's now use JuMP to solve this problem for a sample graph. 145 | 146 | G = [ 147 | 0 0 0 0 1 0 0 0; 148 | 0 0 0 0 0 1 0 0; 149 | 0 0 0 0 0 0 1 0; 150 | 0 0 0 0 0 0 0 1; 151 | 1 0 0 0 0 1 0 1; 152 | 0 1 0 0 1 0 1 0; 153 | 0 0 1 0 0 1 0 1; 154 | 0 0 0 1 1 0 1 0; 155 | ] 156 | 157 | g = SimpleGraph(G) 158 | 159 | gplot(g) 160 | 161 | #+ 162 | 163 | matching = Model(with_optimizer(GLPK.Optimizer)) 164 | 165 | @variable(matching, m[i = 1:nv(g), j = 1:nv(g)], Bin) 166 | @constraint(matching, [i = 1:nv(g)], sum(m[i,:]) <= 1) 167 | @constraint(matching, [i = 1:nv(g), j = 1:nv(g); G[i,j] == 0], m[i,j] == 0) 168 | @constraint(matching, [i = 1:nv(g), j = 1:nv(g)], m[i,j] == m[j,i]) 169 | @objective(matching, Max, sum(m)) 170 | 171 | optimize!(matching) 172 | @show value.(m); 173 | 174 | #' The edges corresponding to the Matching are marked as one in the above matrix. 175 | 176 | #' ## k-Coloring Problem 177 | #' A k-coloring of a graph $G=(V,E)$ is a function $c: V \rightarrow \{1,2...k\}$ such that 178 | #' $c(u) \neq c(v)$ for every edge $(u,v) \in E$. In other words, the numbers 1,2...k represent k colors, 179 | #' and adjacent vertices must have different colours. 180 | #' The goal of a graph coloring problem is to find a minimum number of colours needed to colour a graph. 181 | 182 | #' We model this problem as an ILP by defining a variable decision variable $z_{i}$ for each colour we have available. 183 | #' Given an upper bound $k$ on the number of colors needed, 184 | #' we use $|V| \times k$ decision variables $c_{v,k}$ denoting if vertex $v$ is assigned color $k$. 185 | #' Our model will become: 186 | 187 | #' $$ 188 | #' \begin{align*} 189 | #' \min && \sum_{i=1}^{k} z_{i} \\ 190 | #' s.t. && \sum_{i=1}^{k} c_{v,i} = 1 && \forall v \in V \\ 191 | #' && c_{u,i} + c_{v,i} \leq 1 && \forall (u,v) \in V, i \in \{1,2...k\} \\ 192 | #' && c_{v,i} \in \{0,1\} && \forall v \in V, i \in \{1,2...k\} \\ 193 | #' && z_{i} \in \{0,1\} && \forall i \in \{1,2...k\} 194 | #' \end{align*} 195 | #' $$ 196 | 197 | G = [ 198 | 0 1 0 0 1 1 0 0 0 0; 199 | 1 0 1 0 0 0 1 0 0 0; 200 | 0 1 0 1 0 0 0 1 0 0; 201 | 0 0 1 0 1 0 0 0 1 0; 202 | 1 0 0 1 0 0 0 0 0 1; 203 | 1 0 0 0 0 0 1 0 0 1; 204 | 0 1 0 0 0 1 0 1 0 0; 205 | 0 0 1 0 0 0 1 0 1 0; 206 | 0 0 0 1 0 0 0 1 0 1; 207 | 0 0 0 0 1 1 0 0 1 0; 208 | ] 209 | 210 | g = SimpleGraph(G) 211 | 212 | gplot(g) 213 | 214 | #+ 215 | 216 | k = nv(g) 217 | 218 | k_colouring = Model(with_optimizer(GLPK.Optimizer)) 219 | 220 | @variable(k_colouring, z[1:k], Bin) 221 | @variable(k_colouring, c[1:nv(g),1:k], Bin) 222 | @constraint(k_colouring, [i = 1:nv(g)], sum(c[i,:]) == 1) 223 | @constraint(k_colouring, [i = 1:nv(g), j = 1:nv(g), l = 1:k; G[i,j] == 1], c[i,l] + c[j,l] <= 1) 224 | @constraint(k_colouring, [i = 1:nv(g), l = 1:k], c[i,l] <= z[l]) 225 | 226 | @objective(k_colouring, Min, sum(z)) 227 | 228 | optimize!(k_colouring) 229 | @show value.(z); 230 | @show value.(c); 231 | 232 | #+ 233 | 234 | c = value.(c) 235 | membership = zeros(nv(g)) 236 | for i in 1:nv(g) 237 | for j in 1:k 238 | if c[i,j] == 1 239 | membership[i] = j 240 | break 241 | end 242 | end 243 | end 244 | membership = convert(Array{Int},membership) 245 | 246 | nodecolor = distinguishable_colors(nv(g), colorant"green") 247 | nodefillc = nodecolor[membership] 248 | gplot(g, nodefillc = nodefillc) -------------------------------------------------------------------------------- /script/modelling/geometric_problems.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Geometric Problems 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' These problems in this tutorial are drawn from Chapter 8 of the book 8 | #' Convex Optimization by Boyd and Vandenberghe[[1]](#c1) 9 | 10 | using JuMP 11 | using Ipopt 12 | using Random 13 | # for plots 14 | using Gadfly 15 | using DataFrames 16 | 17 | Random.seed!(1234); 18 | 19 | #' ## Euclidean Projection on a Hyperplane 20 | #' For a given point $x_{0}$ and a set $C$, we refer to any point $z \in C$ 21 | #' which is closest to $x_{0}$ as a projection of $x_{0}$ on $C$. 22 | #' The projection of a point $x_{0}$ on a hyperplane $C = \{x | a' \cdot x = b\}$ is given by 23 | 24 | #' $$ 25 | #' \begin{align*} 26 | #' \min && ||x - x_{0}|| \\ 27 | #' s.t. && a' \cdot x = b 28 | #' \end{align*} 29 | #' $$ 30 | 31 | x = rand(10) 32 | a = rand(10) 33 | b = rand() 34 | 35 | projection = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 36 | @variable(projection, x0[1:10]) 37 | @objective(projection, Min, sum((x - x0) .* (x - x0))) # We minimize the square of the distance here 38 | @constraint(projection, x0' * a == b) # Point must lie on the hyperplane 39 | 40 | optimize!(projection) 41 | @show objective_value(projection); 42 | @show value.(x0); 43 | 44 | #' ## Euclidean Distance Between Polyhedra 45 | #' Given two polyhedra $C = \{x | A_{1} \cdot x \leq b1\}$ and $D = \{x | A_{2} \cdot x \leq b_{2}\}$, 46 | #' the distance between them is the optimal value of the problem: 47 | 48 | #' $$ 49 | #' \begin{align*} 50 | #' \min && ||x - y|| \\ 51 | #' s.t. && A_{1} \cdot x \leq b_{1} \\ 52 | #' && A_{2} \cdot y \leq b_{2} 53 | #' \end{align*} 54 | #' $$ 55 | 56 | A_1 = rand(10, 10) 57 | A_2 = rand(10, 10) 58 | b_1 = rand(10) 59 | b_2 = rand(10) 60 | 61 | polyhedra_distance = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 62 | @variable(polyhedra_distance, x[1:10]) # Point closest on the first polyhedron 63 | @variable(polyhedra_distance, y[1:10]) # Point closest on the second polyhedron 64 | @objective(polyhedra_distance, Min, sum((x - y) .* (x - y))) # We minimize the square of the distance here as above 65 | @constraint(polyhedra_distance, A_1 * x .<= b_1) # Point x must lie on the first polyhedron 66 | @constraint(polyhedra_distance, A_2 * y .<= b_2) # Point y must lie on the second polyhedron 67 | 68 | optimize!(polyhedra_distance) 69 | @show objective_value(polyhedra_distance); 70 | 71 | #' ## Linear Placement Problem 72 | #' We have $N$ points in $\mathbb{R}^2$, and a list of pairs of points that must be connected by links. 73 | #' The positions of some of the $N$ points are fixed; our task is to determine the positions of the remaining points, 74 | #' i.e., to place the remaining points. The objective is to place the points so that the distance between the links is minimized, 75 | #' i.e. our objective is: 76 | 77 | #' $$ 78 | #' \begin{align*} 79 | #' \min && \sum_{(i,j) \in A}||p_{i} - p_{j}|| 80 | #' \end{align*} 81 | #' $$ 82 | 83 | fixed = [ 1 1 -1 -1 1 -1 -0.2 0.1; # coordinates of fixed points 84 | 1 -1 -1 1 -0.5 -0.2 -1 1] 85 | 86 | M = size(fixed,2) # number of fixed points 87 | N = 6 # number of free points 88 | 89 | A = [ 1 0 0 -1 0 0 0 0 0 0 0 0 0 0; # Matrix on links 90 | 1 0 -1 0 0 0 0 0 0 0 0 0 0 0; 91 | 1 0 0 0 -1 0 0 0 0 0 0 0 0 0; 92 | 1 0 0 0 0 0 -1 0 0 0 0 0 0 0; 93 | 1 0 0 0 0 0 0 -1 0 0 0 0 0 0; 94 | 1 0 0 0 0 0 0 0 0 0 -1 0 0 0; 95 | 1 0 0 0 0 0 0 0 0 0 0 0 0 -1; 96 | 0 1 -1 0 0 0 0 0 0 0 0 0 0 0; 97 | 0 1 0 -1 0 0 0 0 0 0 0 0 0 0; 98 | 0 1 0 0 0 -1 0 0 0 0 0 0 0 0; 99 | 0 1 0 0 0 0 0 -1 0 0 0 0 0 0; 100 | 0 1 0 0 0 0 0 0 -1 0 0 0 0 0; 101 | 0 1 0 0 0 0 0 0 0 0 0 0 -1 0; 102 | 0 0 1 -1 0 0 0 0 0 0 0 0 0 0; 103 | 0 0 1 0 0 0 0 -1 0 0 0 0 0 0; 104 | 0 0 1 0 0 0 0 0 0 0 -1 0 0 0; 105 | 0 0 0 1 -1 0 0 0 0 0 0 0 0 0; 106 | 0 0 0 1 0 0 0 0 -1 0 0 0 0 0; 107 | 0 0 0 1 0 0 0 0 0 -1 0 0 0 0; 108 | 0 0 0 1 0 0 0 0 0 0 0 -1 0 0; 109 | 0 0 0 1 0 0 0 0 0 0 0 -1 0 0; 110 | 0 0 0 0 1 -1 0 0 0 0 0 0 0 0; 111 | 0 0 0 0 1 0 -1 0 0 0 0 0 0 0; 112 | 0 0 0 0 1 0 0 0 0 -1 0 0 0 0; 113 | 0 0 0 0 1 0 0 0 0 0 0 0 0 -1; 114 | 0 0 0 0 0 1 0 0 -1 0 0 0 0 0; 115 | 0 0 0 0 0 1 0 0 0 0 -1 0 0 0;] 116 | 117 | placement = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 118 | @variable(placement, p[1:M + N, 1:2]) # A variable array for the coordinates of each point 119 | @constraint(placement, p[N + 1:N + M, :] .== fixed') # We had a constraint for the fixed points 120 | dist = A * p # Matrix of differences between coordinates of 2 points with a link 121 | @objective(placement, Min, sum(dist .* dist)) # We minimize the sum of the square of the distances 122 | 123 | optimize!(placement) 124 | @show value.(p); 125 | @show objective_value(placement); 126 | 127 | #+ 128 | 129 | # Plotting the points 130 | df = DataFrame() 131 | df.x = value.(p)[:,1] 132 | df.y = value.(p)[:,2] 133 | df.type = vcat(fill("Free points", N), fill("Fixed points", M)) 134 | plt = plot(df, x = "x", y = "y", color = "type", Geom.point) 135 | draw(SVG(6inch, 6inch), plt) 136 | 137 | #' ## Floor Planning 138 | #' A floor planning problem consists of rectangles or boxes aligned with the axes which must be placed, 139 | #' within some limits such that they do not overlap. The objective is usually to minimize the size 140 | #' (e.g., area, volume, perimeter) of the bounding box, which is the smallest box that contains the boxes to be configured and placed. 141 | #' We model this problem as follows: 142 | 143 | #' We have N boxes $B_{1}, . . . , B_{N}$ that are to be configured and placed in a rectangle with width $W$ and height $H$, 144 | #' and lower left corner at the position $(0, 0)$. The geometry and position of the $i$th box is specified by 145 | #' its width $w_{i}$ and height $h_{i}$, and the coordinates $(x_{i}, y_{i})$ of its lower left corner. 146 | 147 | #' The variables in the problem are $x_{i}, y_{i}, w_{i}, h_{i}$ for $i=1, \ldots, 148 | #' N,$ and the width $W$ and height $H$ of the bounding rectangle. In all floor planning problems, 149 | #' we require that the cells lie inside the bounding rectangle, $i . e .$ 150 | 151 | #' $$ 152 | #' x_{i} \geq 0, \quad y_{i} \geq 0, \quad x_{i}+w_{i} \leq W, \quad y_{i}+h_{i} \leq H, \quad i=1, \ldots, N 153 | #' $$ 154 | 155 | #' We also require that the cells do not overlap, except possibly on their boundaries, i.e. 156 | 157 | #' $$ 158 | #' x_{i}+w_{i} \leq x_{j} \quad \text{or} \quad x_{j}+w_{j} \leq x_{i} \quad \text{or} \quad y_{i}+h_{j} \leq y_{j} \quad \text{or} \quad y_{j}+h_{i} \leq y_{i} 159 | #' $$ 160 | 161 | n = 5; 162 | 163 | Amin = [ # We'll try this problem with 4 times with different minimum area constraints 164 | 100 100 100 100 100; 165 | 20 50 80 150 200; 166 | 180 80 80 80 80; 167 | 20 150 20 200 110] 168 | 169 | r = 1 170 | 171 | figs=[] 172 | 173 | for i = 1:4 174 | A = Amin[i, :] 175 | 176 | floor_planning = Model(with_optimizer(Ipopt.Optimizer, print_level=0)) 177 | 178 | @variables(floor_planning, begin 179 | x[1:n] >= r 180 | y[1:n] >= r 181 | w[1:n] >= 0 182 | h[1:n] >= 0 183 | W 184 | H 185 | end) 186 | 187 | @constraints(floor_planning, begin 188 | x[5] + w[5] + r <= W # No rectangles at the right of Rectangle 5 189 | x[1] + w[1] + r <= x[3] # Rectangle 1 is at the left of Rectangle 3 190 | x[2] + w[2] + r <= x[3] # Rectangle 2 is at the left of Rectangle 3 191 | x[3] + w[3] + r <= x[5] # Rectangle 3 is at the left of Rectangle 5 192 | x[4] + w[4] + r <= x[5] # Rectangle 4 is at the left of Rectangle 5 193 | y[4] + h[4] + r <= H # No rectangles on top of Rectangle 4 194 | y[5] + h[5] + r <= H # No rectangles on top of Rectangle 5 195 | y[2] + h[2] + r <= y[1] # Rectangle 2 is below Rectangle 1 196 | y[1] + h[1] + r <= y[4] # Rectangle 1 is below Rectangle 4 197 | y[3] + h[3] + r <= y[4] # Rectangle 3 is below Rectangle 4 198 | w .<= 5*h # Aspect ratio constraint 199 | h .<= 5*w # Aspect ratio constraint 200 | A .<= h .* w # Area constraint 201 | end) 202 | 203 | @objective(floor_planning, Min, W + H) 204 | 205 | optimize!(floor_planning) 206 | 207 | @show objective_value(floor_planning); 208 | 209 | D = DataFrame(x = value.(x), y = value.(y), x2 = value.(x) .+ value.(w), y2 = value.(y) .+ value.(h)) 210 | plt = plot(D, xmin = :x, ymin = :y, xmax = :x2, ymax = :y2, Geom.rect) 211 | push!(figs, plt) 212 | end 213 | 214 | #+ 215 | 216 | draw(SVG(6inch, 6inch), vstack(hstack(figs[1], figs[2]), hstack(figs[3], figs[4]))) 217 | 218 | #' ### References 219 | #' 220 | #' 1. Boyd, S., & Vandenberghe, L. (2004). Convex Optimization. Cambridge: Cambridge University Press. doi:10.1017/CBO9780511804441 -------------------------------------------------------------------------------- /script/optimization_concepts/conic_programming.jl: -------------------------------------------------------------------------------- 1 | #' --- 2 | #' title: Conic Programming 3 | #' --- 4 | 5 | #' **Originally Contributed by**: Arpit Bhatia 6 | 7 | #' This tutorial is aimed at providing a simplistic introduction to conic programming using JuMP. 8 | 9 | #' ## What is a Cone? 10 | #' A subset $C$ of a vector space $V$ is a cone if $\forall x \in C$ and positive scalars $\alpha$, 11 | #' the product $\alpha x \in C$. A cone C is a convex cone if $\alpha x + \beta y \in C$, 12 | #' for any positive scalars $\alpha, \beta$, and any $x, y \in C$. 13 | 14 | #' ## Conic Programming 15 | #' Conic programming problems are convex optimization problems in which a convex function is minimized 16 | #' over the intersection of an affine subspace and a convex cone. 17 | #' An example of a conic-form minimization problems, in the primal form is: 18 | 19 | #' $$ 20 | #' \begin{align} 21 | #' & \min_{x \in \mathbb{R}^n} & a_0^T x + b_0 \\ 22 | #' & \;\;\text{s.t.} & A_i x + b_i & \in \mathcal{C}_i & i = 1 \ldots m 23 | #' \end{align} 24 | #' $$ 25 | 26 | #' The corresponding dual problem is: 27 | 28 | #' $$ 29 | #' \begin{align} 30 | #' & \max_{y_1, \ldots, y_m} & -\sum_{i=1}^m b_i^T y_i + b_0 \\ 31 | #' & \;\;\text{s.t.} & a_0 - \sum_{i=1}^m A_i^T y_i & = 0 \\ 32 | #' & & y_i & \in \mathcal{C}_i^* & i = 1 \ldots m 33 | #' \end{align} 34 | #' $$ 35 | 36 | #' where each $\mathcal{C}_i$ is a closed convex cone and $\mathcal{C}_i^*$ is its dual cone. 37 | 38 | #' ## Some of the Types of Cones Supported by JuMP 39 | 40 | using JuMP 41 | using ECOS 42 | using Random 43 | 44 | Random.seed!(1234); 45 | 46 | #' By this point we have used quite a few different solvers. 47 | #' To find out all the different solvers and their supported problem types, check out the 48 | #' [solver table](http://www.juliaopt.org/JuMP.jl/v0.19.0/installation/#Getting-Solvers-1) in the docs. 49 | 50 | #' ### Second-Order Cone 51 | #' The Second-Order Cone (or Lorenz Cone) of dimension $n$ is of the form: 52 | 53 | #' $$ 54 | #' Q^n = \{ (t,x) \in \mathbb{R}^\mbox{n} : t \ge ||x||_2 \} 55 | #' $$ 56 | 57 | #' A Second-Order Cone rotated by $\pi/4$ in the $(x_1,x_2)$ plane is called a Rotated Second-Order Cone. 58 | #' It is of the form: 59 | 60 | #' $$ 61 | #' Q_r^n = \{ (t,u,x) \in \mathbb{R}^\mbox{n} : 2tu \ge ||x||_2^2, t,u \ge 0 \} 62 | #' $$ 63 | 64 | #' These cones are represented in JuMP using the MOI sets `SecondOrderCone` and `RotatedSecondOrderCone`. 65 | 66 | #' #### Example: Euclidean Projection on a Hyperplane 67 | #' For a given point $u_{0}$ and a set $K$, we refer to any point $u \in K$ 68 | #' which is closest to $u_{0}$ as a projection of $u_{0}$ on $K$. 69 | #' The projection of a point $u_{0}$ on a hyperplane $K = \{u | p' \cdot u = q\}$ is given by 70 | 71 | #' $$ 72 | #' \begin{align*} 73 | #' & \min & ||u - u_{0}|| \\ 74 | #' & \;\;\text{s.t.} & p' \cdot u = q \\ 75 | #' \end{align*} 76 | #' $$ 77 | 78 | u0 = rand(10) 79 | p = rand(10) 80 | q = rand(); 81 | 82 | #' We can model the above problem as the following conic program: 83 | 84 | #' $$ 85 | #' \begin{align*} 86 | #' & \min & t \\ 87 | #' & \;\;\text{s.t.} & p' \cdot u = q \\ 88 | #' & & (t, u - u_{0}) \in Q^{n+1} 89 | #' \end{align*} 90 | #' $$ 91 | 92 | #' On comparing this with the primal form of a conic problem we saw above, 93 | 94 | #' $$ 95 | #' \begin{align*} 96 | #' & x = (t , u) &\\ 97 | #' & a_0 = e_1 &\\ 98 | #' & b_0 = 0 &\\ 99 | #' & A_1 = (0, p) &\\ 100 | #' & b_1 = -q &\\ 101 | #' & C_1 = \mathbb{R}_- &\\ 102 | #' & A_2 = 1 &\\ 103 | #' & b_2 = -(0, u_0) &\\ 104 | #' & C_2 = Q^{n+1} & 105 | #' \end{align*} 106 | #' $$ 107 | 108 | #' Thus, we can obtain the dual problem as: 109 | 110 | #' $$ 111 | #' \begin{align*} 112 | #' & \max & y_1 + (0, u_0)^T y_2 \\ 113 | #' & \;\;\text{s.t.} & e_1 - (0,p)^T y_1 - y_2 = 0 \\ 114 | #' & & y_1 \in \mathbb{R}_- \\ 115 | #' & & y_2 \in Q^{n+1} 116 | #' \end{align*} 117 | #' $$ 118 | 119 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 120 | @variable(model, u[1:10]) 121 | @variable(model, t) 122 | @objective(model, Min, t) 123 | @constraint(model, [t, (u - u0)...] in SecondOrderCone()) 124 | @constraint(model, u' * p == q) 125 | optimize!(model) 126 | 127 | #+ 128 | 129 | @show objective_value(model); 130 | @show value.(u); 131 | 132 | #+ 133 | 134 | e1 = [1, zeros(10)...] 135 | dual_model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 136 | @variable(dual_model, y1 <= 0) 137 | @variable(dual_model, y2[1:11]) 138 | @objective(dual_model, Max, q * y1 + [0, u0...]' * y2) 139 | @constraint(dual_model, e1 - [0, p...] .* y1 - y2 .== 0) 140 | @constraint(dual_model, y2 in SecondOrderCone()) 141 | optimize!(dual_model) 142 | 143 | #+ 144 | 145 | @show objective_value(dual_model); 146 | 147 | 148 | #' We can also have an equivalent formulation using a Rotated Second-Order Cone: 149 | 150 | #' $$ 151 | #' \begin{align*} 152 | #' & \min & t \\ 153 | #' & \;\;\text{s.t.} & p' \cdot u = q \\ 154 | #' & & (t, 1/2, u - u_{0})\in Q_r^{n+2} 155 | #' \end{align*} 156 | #' $$ 157 | 158 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 159 | @variable(model, u[1:10]) 160 | @variable(model, t) 161 | @objective(model, Min, t) 162 | @constraint(model, [t, 0.5, (u - u0)...] in RotatedSecondOrderCone()) 163 | @constraint(model, u' * p == q) 164 | optimize!(model) 165 | 166 | #+ 167 | 168 | @show value.(u); 169 | 170 | #' The difference here is that the objective in the case of the Second-Order Cone is $||u - u_{0}||_2$, 171 | #' while in the case of a Rotated Second-Order Cone is $||u - u_{0}||_2^2$. 172 | #' However, the value of x is the same for both. 173 | 174 | #' ### Exponential Cone 175 | 176 | #' An Exponential Cone is a set of the form: 177 | 178 | #' $$ 179 | #' K_{exp} = \{ (x,y,z) \in \mathbb{R}^3 : y \exp (x/y) \le z, y > 0 \} 180 | #' $$ 181 | 182 | #' It is represented in JuMP using the MOI set `ExponentialCone`. 183 | 184 | #' #### Example: Entropy Maximization 185 | #' As the name suggests, the entropy maximization problem consists of maximizing the entropy function, 186 | #' $H(x) = -x\log{x}$ subject to linear inequality constraints. 187 | 188 | #' $$ 189 | #' \begin{align*} 190 | #' & \max & - \sum_{i=1}^n x_i \log x_i \\ 191 | #' & \;\;\text{s.t.} & \mathbf{1}' x = 1 \\ 192 | #' & & Ax \leq b 193 | #' \end{align*} 194 | #' $$ 195 | 196 | #' We can model this problem using an exponential cone by using the following transformation: 197 | 198 | #' $$ 199 | #' t\leq -x\log{x} \iff t\leq x\log(1/x) \iff (1, x, t) \in K_{exp} 200 | #' $$ 201 | 202 | #' Thus, our problem becomes, 203 | 204 | #' $$ 205 | #' \begin{align*} 206 | #' & \max & 1^Tt \\ 207 | #' & \;\;\text{s.t.} & Ax \leq b \\ 208 | #' & & 1^T x = 1 \\ 209 | #' & & (1, x_i, t_i) \in K_{exp} && \forall i = 1 \ldots n \\ 210 | #' \end{align*} 211 | #' $$ 212 | 213 | n = 15; 214 | m = 10; 215 | A = randn(m, n); 216 | b = rand(m, 1); 217 | 218 | model = Model(with_optimizer(ECOS.Optimizer, printlevel = 0)) 219 | @variable(model, t[1:n]) 220 | @variable(model, x[1:n]) 221 | @objective(model, Max, sum(t)) 222 | @constraint(model, sum(x) == 1) 223 | @constraint(model, A * x .<= b ) 224 | # Cannot use the exponential cone directly in JuMP, hence we use MOI to specify the set. 225 | @constraint(model, con[i = 1:n], [1, x[i], t[i]] in MOI.ExponentialCone()) 226 | 227 | optimize!(model); 228 | 229 | #+ 230 | 231 | @show objective_value(model); 232 | 233 | #' ### Positive Semidefinite Cone 234 | #' The set of Positive Semidefinite Matrices of dimension $n$ form a cone in $\mathbb{R}^n$. 235 | #' We write this set mathematically as 236 | 237 | #' $$ 238 | #' \mathcal{S}_{+}^n = \{ X \in \mathcal{S}^n \mid z^T X z \geq 0, \: \forall z\in \mathbb{R}^n \}. 239 | #' $$ 240 | 241 | #' A PSD cone is represented in JuMP using the MOI sets 242 | #' `PositiveSemidefiniteConeTriangle` (for upper triangle of a PSD matrix) and 243 | #' `PositiveSemidefiniteConeSquare` (for a complete PSD matrix). 244 | #' However, it is prefferable to use the `PSDCone` shortcut as illustrated below. 245 | 246 | #' #### Example: Largest Eigenvalue of a Symmetric Matrix 247 | #' Suppose $A$ has eigenvalues $\lambda_{1} \geq \lambda_{2} \ldots \geq \lambda_{n}$. 248 | #' Then the matrix $t I-A$ has eigenvalues $t-\lambda_{1}, t-\lambda_{2}, \ldots, t-\lambda_{n}$. 249 | #' Note that $t I-A$ is PSD exactly when all these eigenvalues are non-negative, 250 | #' and this happens for values $t \geq \lambda_{1} .$ 251 | #' Thus, we can model the problem of finding the largest eigenvalue of a symmetric matrix as: 252 | 253 | #' $$ 254 | #' \begin{align*} 255 | #' \lambda_{1} = \max t \\ 256 | #' \text { s.t. } t I-A \succeq 0 257 | #' \end{align*} 258 | #' $$ 259 | 260 | #+ tangle = false 261 | 262 | using LinearAlgebra 263 | using CSDP 264 | 265 | A = [3 2 4; 266 | 2 0 2; 267 | 4 2 3] 268 | 269 | model = Model(with_optimizer(CSDP.Optimizer, printlevel = 0)) 270 | @variable(model, t) 271 | @objective(model, Min, t) 272 | @constraint(model, t .* Matrix{Float64}(I, 3, 3) - A in PSDCone()) 273 | 274 | optimize!(model) 275 | 276 | #+ tangle = false 277 | 278 | @show objective_value(model); 279 | 280 | #' ## Other Cones and Functions 281 | #' For other cones supported by JuMP, check out the 282 | #' [MathOptInterface Manual](http://www.juliaopt.org/MathOptInterface.jl/dev/apimanual/#Standard-form-problem-1). 283 | #' A good resource for learning more about functions which can be modelled using cones is the 284 | #' MOSEK Modeling Cookbook[[1]](#c1). 285 | 286 | #' ### References 287 | #' 288 | #' 1. MOSEK Modeling Cookbook — MOSEK Modeling Cookbook 3.1. Available at: https://docs.mosek.com/modeling-cookbook/index.html. --------------------------------------------------------------------------------