├── .gitignore ├── Setup.hs ├── ChangeLog.md ├── app ├── Main.o ├── Main.hi └── Main.hs ├── test ├── Spec.hi ├── Spec.o └── Spec.hs ├── src ├── Control.hs ├── Limpy.hs ├── Mixed.hs ├── Lib.hs ├── Cone.hs └── LinRel.hs ├── package.yaml ├── LICENSE ├── stack.yaml.lock ├── stack.yaml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .stack-work/ 2 | lqr.cabal 3 | *~ -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /ChangeLog.md: -------------------------------------------------------------------------------- 1 | # Changelog for lqr 2 | 3 | ## Unreleased changes 4 | -------------------------------------------------------------------------------- /app/Main.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philzook58/ConvexCat/master/app/Main.o -------------------------------------------------------------------------------- /app/Main.hi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philzook58/ConvexCat/master/app/Main.hi -------------------------------------------------------------------------------- /test/Spec.hi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philzook58/ConvexCat/master/test/Spec.hi -------------------------------------------------------------------------------- /test/Spec.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philzook58/ConvexCat/master/test/Spec.o -------------------------------------------------------------------------------- /app/Main.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import Lib 4 | 5 | main :: IO () 6 | main = someFunc 7 | -------------------------------------------------------------------------------- /src/Control.hs: -------------------------------------------------------------------------------- 1 | module Control where 2 | import Optimization.Constrained.Penalty 3 | import Data.Functor.Compose 4 | import Data.Functor.Product 5 | import Linear.V 6 | 7 | {- 8 | 9 | Dynobud had a flavor like this right? 10 | -} 11 | 12 | -- control problem with state control vectors lasting for t time steps. 13 | type ControlProb state control t a = (Compose (Product state control) (V t)) a 14 | 15 | -- simpleDiscrete :: (forall s. Mode s => f (AD s a) -> AD s a) -> Opt f a -> Opt f a 16 | --simpleDiscrete :: (forall s. Mode s => f (AD s a) -> AD s a) -> Opt f a -> Opt f a 17 | --simpleDiscrete xdot = constrainEQ 18 | 19 | -- give lens 20 | -------------------------------------------------------------------------------- /test/Spec.hs: -------------------------------------------------------------------------------- 1 | import LinRel 2 | 3 | r20 :: HLinRel IV IV 4 | r20 = resistor 20 5 | 6 | main :: IO () 7 | main = do putStrLn "Test suite not yet implemented" 8 | print (r20 == (hid <<< r20)) 9 | print (r20 == r20 <<< hid) 10 | print (r20 == (hmeet r20 r20)) 11 | print $ resistor 50 == r20 <<< (resistor 30) 12 | print $ (bridge 10) <<< (bridge 10) == (bridge 5) 13 | print $ v2h (h2v r20) == r20 14 | print $ r20 <= htop 15 | print $ hconverse (hconverse r20) == r20 16 | print $ (open <<< r20) == open 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /src/Limpy.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE FlexibleContexts #-} 3 | module Limpy where 4 | 5 | import Data.Functor.Rep 6 | -- import Cone 7 | -- import qualified Numeric.Limp.Rep as R 8 | -- import Numeric.Limp.Program 9 | -- import Numeric.Limp.Canon 10 | -- import Numeric.Limp.Solvers.Cbc 11 | import Linear.V3 12 | 13 | -- not happy. Let's try glpk 14 | 15 | 16 | listulate :: (Bounded (Rep f), Enum (Rep f), Representable f, Num a, Eq a) => f a -> [(Rep f, a)] 17 | listulate x = [ (i, index x i) | i <- allIndices, index x i /= 0] where allIndices = [minBound .. maxBound] 18 | 19 | 20 | type HRep f a = [f a] 21 | 22 | -- findPoint :: HRep f Double -> f Double 23 | -- findPoint hrep = solve $ minimise (r1 minBound) constraints [] where 24 | -- constraints = (foldr1 (:&&) (map (\f -> let table = [(Right x, R.R n) | (x, n) <- listulate f] in (LR table (0)) :>= conR 0) hrep)) :&& (r1 minBound :== con 1) 25 | 26 | -- Rep V3 is not Ord. Sigh. 27 | ex1 :: HRep V3 Double 28 | ex1 = [V3 0 0 1, V3 1 0 0, V3 1 1 1] 29 | 30 | -- unlistulate result = tabulate (\j -> lookup 0 j result) -------------------------------------------------------------------------------- /package.yaml: -------------------------------------------------------------------------------- 1 | name: lqr 2 | version: 0.1.0.0 3 | github: "githubuser/lqr" 4 | license: BSD3 5 | author: "Author name here" 6 | maintainer: "example@example.com" 7 | copyright: "2019 Author name here" 8 | 9 | extra-source-files: 10 | - README.md 11 | - ChangeLog.md 12 | 13 | # Metadata used when publishing your package 14 | # synopsis: Short description of your package 15 | # category: Web 16 | 17 | # To avoid duplicated efforts in documentation and dealing with the 18 | # complications of embedding Haddock markup inside cabal files, it is 19 | # common to point users to the README.md file. 20 | description: Please see the README on GitHub at 21 | 22 | dependencies: 23 | - base >= 4.7 && < 5 24 | - linear 25 | - optimization 26 | - ad 27 | #- limp-cbc 28 | - adjunctions 29 | - distributive 30 | #- limp 31 | - glpk-hs 32 | 33 | library: 34 | source-dirs: src 35 | dependencies: 36 | - hmatrix 37 | extra-libraries: 38 | - iconv 39 | 40 | executables: 41 | lqr-exe: 42 | main: Main.hs 43 | source-dirs: app 44 | ghc-options: 45 | - -threaded 46 | - -rtsopts 47 | - -with-rtsopts=-N 48 | dependencies: 49 | - lqr 50 | - hmatrix 51 | 52 | tests: 53 | lqr-test: 54 | main: Spec.hs 55 | source-dirs: test 56 | ghc-options: 57 | - -threaded 58 | - -rtsopts 59 | - -with-rtsopts=-N 60 | dependencies: 61 | - lqr 62 | - hmatrix 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Author name here (c) 2019 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Author name here nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /stack.yaml.lock: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by Stack. 2 | # You should not edit this file by hand. 3 | # For more information, please see the documentation at: 4 | # https://docs.haskellstack.org/en/stable/lock_files 5 | 6 | packages: 7 | - completed: 8 | hackage: optimization-0.1.9@sha256:2f1b6f029411456056860005e76884e82b037129397090c1b4eef208bc95e559,2643 9 | pantry-tree: 10 | size: 1533 11 | sha256: 4abadcfccecbe39c032aac20a82b82641157c5c32e73ffb0f34215ce90dd01e6 12 | original: 13 | hackage: optimization-0.1.9 14 | - completed: 15 | hackage: distributive-0.5.3@sha256:f367b4841d923133f979dd9acb78d76db408ea25242794fcded987e1321a6d19,2787 16 | pantry-tree: 17 | size: 909 18 | sha256: f1597c5c9f885e13bb635c63df044223f2f57a666c010eff74bba7d0ce1f494f 19 | original: 20 | hackage: distributive-0.5.3 21 | - completed: 22 | hackage: gasp-1.1.0.0@sha256:14b3cd6f541ff3a4169147c9f461ca30a4c75b78da5c7292f88d241cb35892c0,633 23 | pantry-tree: 24 | size: 208 25 | sha256: 6cb716fc7e0be126f1e1f854172c4068077479d2f8282bb58d3b8411663039de 26 | original: 27 | hackage: gasp-1.1.0.0 28 | - completed: 29 | cabal-file: 30 | size: 2582 31 | sha256: e70c66ece7ab22ea30ce6e8663e4e7de15befe82e87e69e0cc71330016e63de7 32 | name: glpk-hs 33 | version: 0.6.1 34 | git: https://github.com/marlls1989/glpk-hs.git 35 | pantry-tree: 36 | size: 1609 37 | sha256: 62f9769cef79828a57e6df36e6ca065537ba4738b646f29b397aee27749a35f5 38 | commit: 3be20e60344c1751629a57dbb45927ee33ee35c9 39 | original: 40 | git: https://github.com/marlls1989/glpk-hs.git 41 | commit: 3be20e60344c1751629a57dbb45927ee33ee35c9 42 | snapshots: 43 | - completed: 44 | size: 498186 45 | url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/13/22.yaml 46 | sha256: d4f07dc3d5658260c2fe34266ad7618f6c84d34decf559c9c786ac1cfccf4e7b 47 | original: lts-13.22 48 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by 'stack init' 2 | # 3 | # Some commonly used options have been documented as comments in this file. 4 | # For advanced use and comprehensive documentation of the format, please see: 5 | # https://docs.haskellstack.org/en/stable/yaml_configuration/ 6 | 7 | # Resolver to choose a 'specific' stackage snapshot or a compiler version. 8 | # A snapshot resolver dictates the compiler version and the set of packages 9 | # to be used for project dependencies. For example: 10 | # 11 | # resolver: lts-3.5 12 | # resolver: nightly-2015-09-21 13 | # resolver: ghc-7.10.2 14 | # 15 | # The location of a snapshot can be provided as a file or url. Stack assumes 16 | # a snapshot provided as a file might change, whereas a url resource does not. 17 | # 18 | # resolver: ./custom-snapshot.yaml 19 | # resolver: https://example.com/snapshots/2018-01-01.yaml 20 | resolver: lts-13.22 21 | 22 | # User packages to be built. 23 | # Various formats can be used as shown in the example below. 24 | # 25 | # packages: 26 | # - some-directory 27 | # - https://example.com/foo/bar/baz-0.0.2.tar.gz 28 | # - location: 29 | # git: https://github.com/commercialhaskell/stack.git 30 | # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a 31 | # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a 32 | # subdirs: 33 | # - auto-update 34 | # - wai 35 | packages: 36 | - . 37 | #- location: 38 | # git: https://github.com/jyp/glpk-hs.git 39 | 40 | # Dependency packages to be pulled from upstream that are not in the resolver 41 | # using the same syntax as the packages field. 42 | # (e.g., acme-missiles-0.3) 43 | extra-deps: 44 | - optimization-0.1.9 45 | - distributive-0.5.3 46 | #- limp-0.3.2.3 47 | #- limp-cbc-0.3.2.3 48 | #- glpk-hs-0.5 49 | - gasp-1.1.0.0 50 | - git: https://github.com/marlls1989/glpk-hs.git 51 | commit: 3be20e60344c1751629a57dbb45927ee33ee35c9 52 | # Override default flag values for local packages and extra-deps 53 | # flags: {} 54 | 55 | # Extra package databases containing global packages 56 | # extra-package-dbs: [] 57 | 58 | # Control whether we use the GHC we find on the path 59 | # system-ghc: true 60 | # 61 | # Require a specific version of stack, using version ranges 62 | # require-stack-version: -any # Default 63 | # require-stack-version: ">=1.9" 64 | # 65 | # Override the architecture used by stack, especially useful on Windows 66 | # arch: i386 67 | # arch: x86_64 68 | # 69 | # Extra directories used by stack for building 70 | # extra-include-dirs: [/path/to/dir] 71 | # extra-lib-dirs: [/path/to/dir] 72 | # 73 | # Allow a newer minor version of GHC than the snapshot specifies 74 | # compiler-check: newer-minor 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Convex Programming in Haskell Stuff 2 | 3 | I feel pulled in multiple directions. I want to wait to write a blog post until the ideas in it have some level of completion. But sometimes that level never comes. My attention may wander off. And yet, I feel I often have some interesting half built things to show. 4 | I have many loves in my life, but two of them are Haskell and Convex Programming. 5 | 6 | In a previous post, I talked a bit about how reducing convex programming to questions about cones leads to elegance and simplification. 7 | 8 | There are a couple of approaches to well-typed vectors in Haskell. One of them is to consder an f :: * -> * parameter as characterizing a vector space. We intend to fill in this with things like V4. 9 | data V4 a = V4 a a a a 10 | A vector space can be consdiered $R^n$. The f shape is sort of the $-^n$ part. The part that tells us the shape/ size of the vector space. We can then choose to fill it in, making is a vector space over Doubles or Complex Doubles or something more exotic. 11 | The fundamental construct we want to talk about for cones is the Ray, rather than the vector or point. A Ray is a direction. We can represent it with a vector, if we ignore the magnitude of the vector or if we choose to always work with normalized vectors (which is less elegant really). 12 | A cone is a set that is closed under addition and non negative scalar multiplication. It is a convex set of rays. 13 | The dual space to rays are the halfspace cones. They can be naturally parametrized also by vectors (basically the vectors normal to the planes pointing in the direction of halfspace you want). Any ray that has positive dot product with this vector is in the halfspace, which gives a simple test. 14 | 15 | Polytopes have (at least) two natural representations. They can be expressed as a sum of generator rays (the corners of the polytope) or as the set obeying a set of halfplane constraints. The first is called the V representation and the latter the H representation. The two are highly interconnected by duality. 16 | 17 | The difficulty and method for solving these qeustions can depend strongly on the representation you have 18 | 19 | What are geoemtrically interesting questions to ask. 20 | 1. Whether a ray is in a cone. 21 | + Easy given HRep. Just do all the dot products 22 | + 23 | 2. Finding a ray that is in a cone 24 | - Any ray or an extremal ray? Extremal according to what? 25 | - Projecting to a cone. There are metrics available. The cosine metric. The Chord metric. Others? They feel a bit goofy. They feel far less elegant than the quadratic metric of a vector space. The dot product is a somewhat natural thing to talk about, but by design. Abs metric? You can use the unit plane. and put a metric on it. The cone space is a relative of projective space. which has similar problems. The cross ratio? Logarithm of dot product? Maybe you do need a plane to work on to have a reasonable deifition of extremal. Etremal with respect to a cone generalized inequality on the euclidean space. Extremal with respect to another cone? But a particular kind of cone. It contains (0,0,0,1)? A projection excluding (0,0,0,1) 26 | 27 | 28 | 4. Convert VRep to HRep and vice versa 29 | + take every possible choice out of set. Do linear solve. Check if linear solve matches everything else. 30 | 5. Intersections 31 | + Alternating Projecction. The most primitive and obvious. 32 | + ADMM. 33 | 6. Minkowski Sums = Convex Hull 34 | 7. Projection 35 | 8. Pruning Redundancy 36 | 9. Cone Containment Testing. -------------------------------------------------------------------------------- /src/Mixed.hs: -------------------------------------------------------------------------------- 1 | module Mixed where 2 | import Linear.V4 3 | import Data.Foldable 4 | {- 5 | 6 | 7 | 8 | branch and bound - store a best yet 9 | Have a hierarchy of relaxed problems - converting 10 | 11 | V4 a b c d = V4 a b c d 12 | 13 | V4' a b = V4' a b a 14 | 15 | -- all possible choices of a and b 16 | -- somehow connect these functor together 17 | V1' a b = V1 a 18 | V1' a b = V1 b 19 | 20 | -- this also perhaps let us carry down tips? So it's not all bad. 21 | V4 (Either Int Double) 22 | data VarType = Int | Double 23 | data V4' = (V4 VarType, Vector Double, Vector Int) -- not intrisinacally safe, but makes sense. 24 | data Vec a (Either Int Double) = [(a, Either Int Double)] -- free vector style. 25 | 26 | 1. How to check 27 | 28 | 29 | for performance? 30 | V4 (Either Int Double) -> (Vector Int, Vector Double) 31 | 32 | maximum :: f (Either a b ) -> Either a b 33 | 34 | really needs to return a position 35 | 36 | (Traversable f, Representable f) 37 | ifoldMapRep? maybe 38 | 39 | bbsearch :: 40 | 41 | allbinary :: (Applicative f, Traversable f) => [f Bool] 42 | allbinary = sequenceA $ pure [True, False] 43 | 44 | 45 | 46 | -} 47 | 48 | -- could generalize this to any bounded enum 49 | -- the ord (f b) is ugly as hell 50 | bruteforce :: (Applicative f, Traversable f, Ord a, Ord (f b), Bounded b, Enum b) => (f b -> a) -> (a, f b) 51 | bruteforce f = maximum $ map (\x -> (f x, x)) allbinary where allbinary = sequenceA $ pure [minBound .. maxBound] -- [True, False] 52 | 53 | mosttrue :: (Int, V4 Bool) 54 | mosttrue = bruteforce $ sum . (fmap fromEnum) 55 | 56 | -- constrained maximization using a filtering function 57 | bruteforce' :: (Applicative f, Traversable f, Ord a, Ord (f b), Bounded b, Enum b) => (f b -> a) -> (f b -> Bool) -> (a, f b) 58 | bruteforce' f constraint = maximum $ map (\x -> (f x, x)) $ filter constraint $ allbinary where allbinary = sequenceA $ pure [minBound .. maxBound] 59 | 60 | leasttrue :: (Int, V4 Bool) 61 | leasttrue = bruteforce $ sum . (fmap (negate . fromEnum)) 62 | 63 | thismanytrue :: Int -> (Int, V4 Bool) 64 | thismanytrue n = bruteforce $ (\n' -> negate $ abs (n - n')) . sum . (fmap fromEnum) 65 | 66 | partialbruteforce :: (Applicative f, Traversable f, Ord a, Ord (f b), Bounded b, Enum b) => (f b -> a) -> f (Maybe b) -> (a, f b) 67 | partialbruteforce obj pf = maximum $ map (\x -> (obj x, x)) $ traverse (maybe [minBound .. maxBound] pure) pf 68 | 69 | constrainedmosttrue = partialbruteforce (sum . (fmap fromEnum)) (V4 (Just False) Nothing Nothing (Just True)) 70 | 71 | 72 | {- 73 | 74 | pruning search. 75 | a pruning function, tells us whetehr we can possibly beat the current best score 76 | (a -> f (Maybe b) -> Bool) 77 | perhaps the function should also select some good suggestions 78 | a -> f (Maybe b) -> Maybe (f (Maybe b)) 79 | 80 | a -> f (Maybe b) -> [ f (Maybe b) ] -- return refined possibilities that can possibly beat a. 81 | prunesearch :: (a -> f (Maybe b) -> [ f (Maybe b) ]) -> (f b -> a) -> f (Maybe b) -> (a, f b) 82 | prunesearch prune obj partialf = 83 | 84 | 85 | 86 | a relaxation function. A bound. 87 | f (Maybe b) -> (a, f (Either b c)) 88 | (f Double) -> (Double, f Double) 89 | objective :: (f (Either b c) -> a) 90 | 91 | using Ordering via objective function. 92 | instance Galois (f Int) (f Double) 93 | data Galois Int Double = {} 94 | 95 | objective :: 96 | f (Either a b) -> c 97 | 98 | Left/Right patterns give different domains. The most restrictive is all right, the least restrivie is all left. 99 | Incomparable are incomarable. 100 | 101 | relaxedsolve -- does not change LR pattern 102 | prunesolve -- does change LR pattern. 103 | 104 | There is an ordering relations for possible solutions given an LR pattern. The ordering is given by the induced ordering of obj. 105 | we have abstract/concrete pair 106 | concrete :: Pattern -> f (Either a b) -> f (Either a b) 107 | 108 | coherence of type class instances for the LR pattern 109 | 110 | instance Galois V2 V2 111 | concrete = id 112 | abstract = id 113 | instance Galois f f', => V2 f 114 | 115 | -- could carry around the objective function implicitly. 116 | newtype Obj1 f = Obj1 f 117 | instance Ord Obj1 where 118 | compare x y = compare (obj x) (obj y) 119 | 120 | newtype ConeInEq f = 121 | 122 | instance => Lattice (ConeInEq f Double) = 123 | \/ 124 | 125 | instance Lattice (V4 a) where 126 | -- pointwise? 127 | 128 | 129 | -- not entirely dissimilar from A* 130 | prunesearch bound obj partial 131 | 132 | partial solution is path already found. bound is underestimator of path to go. That is A* 133 | 134 | 135 | 136 | -} -------------------------------------------------------------------------------- /src/Lib.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE NoImplicitPrelude, TypeSynonymInstances, RankNTypes, StandaloneDeriving, 2 | ScopedTypeVariables, TypeApplications, GADTs, TypeOperators, DeriveTraversable, 3 | FlexibleInstances, AllowAmbiguousTypes, UndecidableInstances, TypeFamilies, LambdaCase #-} 4 | module Lib 5 | where 6 | import Numeric.LinearAlgebra 7 | import Control.Category 8 | import Prelude hiding ((.), id) 9 | import Control.Arrow ((***)) 10 | someFunc :: IO () 11 | someFunc = putStrLn "someFunc" 12 | 13 | 14 | 15 | {- 16 | type V a = a -> Double 17 | type D a = a -> Double 18 | D (V a) 19 | type V a = ContT () Q a -- ContT b Q a = (a ->Q b) -> Q b. Has some way of dealing with a index. Some kind of 20 | SelT -- takes a diag, unsummed? 21 | -- (a -> Q b) -> Q a 22 | 23 | newtype Dag a = Dag (a -> Q ()) 24 | 25 | class Dagger a where 26 | type Dual :: * -> * -- *? 27 | dag :: a -> Dual a 28 | instance Dagger (D (Vec a)) where 29 | type Dual = Vec 30 | dag (D v) = join $ ContT () 31 | -- dagger needs to embed a search procedure? 32 | 33 | instance Dagger (D (Vec Bool)) where 34 | type Dual = Vec Bool 35 | dag (D v) = join $ ContT () 36 | instance Dagger a b where 37 | dag :: a -> b 38 | dag' :: b -> a 39 | instance Dagger k where 40 | type family Dual :: * -> * 41 | dag :: k a (Dual a) -- dag'? 42 | 43 | -- also a kind of dot product 44 | dag1 :: ((a -> Q ()) -> Q ()) -> (((a -> Q ()) -> Q ()) -> Q () 45 | dag1 f = 46 | 47 | -- it's a kind of dot product. 48 | lift :: f Double -> (a -> Double) -> Double 49 | lift w f = fold (+) $ (mapWithIndex \k v -> (f k) * v) w 50 | 51 | -- tensor is a_i b_i, mutiply out without summing 52 | tensor :: f a -> f a -> f a 53 | tensor = intersectWith (*) 54 | 55 | type V' a = (a -> Q ()) -> Q () 56 | tensor :: V' a -> V' a -> V' a 57 | 58 | dag :: (V' Bool -> Q ()) -> V' Bool 59 | dag f = \g -> True False 60 | 61 | dag :: (Bool -> Q ()) -> V' Bool 62 | dag :: (Bool -> Q ()) -> (Bool -> Q ()) -> Q () 63 | dag f g = (f True) (g True) + (f False) (g False) 64 | 65 | 66 | 67 | dag :: (Bool -> Q ()) -> Q Bool 68 | dag f = (f True) >>= true + (f False) >>= false 69 | 70 | dag :: Q Bool -> (Bool -> Q ()) 71 | dag xs b = lookup b xs 72 | dag :: Eq a => Q a -> (a -> Q ()) -- maybe Ord 73 | dag xs b = lookup b xs 74 | 75 | -- If we mixed Hedges select sat solver with the Q monad? 76 | -- (Bool -> Q Bool) -> Q Bool ? the unsummed trace? diag? Hedgest hing would come out weirder. 77 | 78 | 79 | true :: () -> Q Bool 80 | true _ = pure True 81 | false :: () -> Q Bool 82 | false _ = pure False 83 | 84 | -- the seelect function. 85 | -- (b -> m ()) -> m b 86 | 87 | -} 88 | 89 | 90 | type M = Matrix Double 91 | 92 | {- 93 | 94 | data BMatrix a b = BMatrix M M M M | Id -- A B C D 95 | 96 | instance Category BMatrix where 97 | id = Id 98 | Id . x = x 99 | x . Id = x 100 | (BMatrix a b c d) . (BMatrix e f g h) = BMatrix <\> where 101 | q = d + e 102 | qlu = lupacked q 103 | qinv = lusolve qlu 104 | a' = a - (b <> (qinv c)) 105 | b' = b <> (qinv f) 106 | c' = g <> (qinv c) 107 | d' = h - g <> (qinv f) 108 | 109 | par :: BMatrix a b -> BMatrix c d -> BMatrix (a,c) (b,d) 110 | par (Matrix a b c d) (BMatrix e f g h) = BMatrix (diagBlock [a,e]) (diagBlock [b,f]) (diagBlock [c,g]) (diagBlock [d,h]) 111 | dup = BMatrix a (a,a) 112 | dup = BMatrix (diagBlock [ident,ident]) (ident ||| ident) (ident === ident) (diagBlock [ident,ident]) 113 | id = BMatrix ident ident ident ident 114 | avg = 115 | -} 116 | data QuadFun a b = QuadFun {mat :: M, vec :: Vector Double, c :: Double } deriving Show 117 | 118 | 119 | --data QuadFun a b where 120 | -- QuadFun :: (Bounded a, Bounded b, Enum a, Enum b) => M -> Vector Double -> Double -> QuadFun a b } 121 | 122 | -- This is kron product. 123 | instance (Enum a, Enum b, Bounded a) => Enum (a,b) where 124 | toEnum x = let (a, b) = quotRem x (fromEnum (maxBound :: a)) in 125 | (toEnum a , toEnum b) 126 | fromEnum (a,b) = (fromEnum (maxBound :: a)) * (fromEnum a) + (fromEnum b) 127 | {- 128 | instance (Enum a, Enum b, Bounded a) => Enum (Either a b) where 129 | toEnum x | x > (fromEnum (maxBound :: a)) = Left (toEnum x) 130 | | otherwise = Right $ toEnum (x - 1 - (fromEnum (maxBound :: a))) 131 | fromEnum (Left a) = fromEnum a 132 | fromEnum (Right b) = (fromEnum (maxBound :: a)) + 1 + (fromEnum b) 133 | -} 134 | -- deriving instance (Enum a, Enum b) => Enum (a,b) 135 | 136 | 137 | count :: forall a. (Enum a, Bounded a) => Int 138 | count = (fromEnum (maxBound @a)) - (fromEnum (minBound @a)) + 1 139 | 140 | id ::forall a. (Enum a, Bounded a) => QuadFun a a 141 | id = let s = 2 * (count @ a) in QuadFun (ident s) (konst 0 s) 0 142 | 143 | --dup :: QuadFun a (a,a) 144 | --dup = QuadFun (ident 145 | 146 | class (Bounded a, Enum a) => BEnum a where 147 | instance (Bounded a, Enum a) => BEnum a where 148 | 149 | t7 :: QuadFun () () 150 | t7 = QuadFun (konst 4 (2,2)) (konst 7 2) 3 151 | 152 | t8 = (mat t7) <\> (vec t7) 153 | t4 = par t7 t7 154 | t9 = (mat t4) <\> (vec t4) 155 | 156 | compose :: forall a b c. BEnum b => QuadFun b c -> QuadFun a b -> QuadFun a c 157 | compose (QuadFun m' w' c') (QuadFun m w c) = QuadFun m'' w'' c'' where 158 | n = count @b 159 | k = rows m -- assuming square 160 | l = rows m' 161 | i = ident n 162 | q = konst 0 (k-n,n) === i -- I underneath zeros 163 | q' = -i === konst 0 (l-n,n) -- -i above zeros 164 | m'' = fromBlocks [[m, q, 0], 165 | [tr q ,0, tr q'], 166 | [0, q', m']] 167 | w'' = vjoin [w, konst 0 n, w'] 168 | c'' = c + c' 169 | 170 | identOut :: forall a b. BEnum b => QuadFun a b -> M 171 | identOut (QuadFun m w c) = konst 0 (k-n,n) === i where 172 | n = count @b 173 | k = rows m 174 | i = ident n 175 | identIn :: forall a b. BEnum a => QuadFun a b -> M 176 | identIn (QuadFun m w c) = -i === konst 0 (k-n,n) where 177 | n = count @a 178 | k = rows m 179 | i = ident n 180 | type a :+: b = Either a b 181 | -- I kind of feel like our sign convention is flipped 182 | par :: forall a b c d. (BEnum a, BEnum b, BEnum c, BEnum d) => QuadFun a c -> QuadFun b d -> QuadFun (a :+: b) (c :+: d) 183 | par x@(QuadFun m w c) y@(QuadFun m' w' c') = QuadFun m'' w'' c'' where 184 | ia = ident (count @a) 185 | ib = ident (count @b) 186 | ia' = identIn x 187 | ib' = identIn y 188 | ia't = tr ia' 189 | ib't = tr ib' 190 | ic = - ident (count @c) 191 | id = - ident (count @d) 192 | ic' = identOut x 193 | id' = identOut y 194 | ic't = tr ic' 195 | id't = tr id' 196 | n = (count @a) + (count @b) 197 | n' = (count @c) + (count @d) 198 | --iab = fromBlocks [[0,0,ia,0], [0,0,0,ib], [ia, 0,0,0], [0,ib,0,0]] 199 | --iab' = fromBlocks [[tr (identIn x),0], [0 , tr (identIn y)]] 200 | --zab = konst 0 (n,n) 201 | -- should be 2 ins + 2 langrange + 2 matr + 2lagarne + 2 outs = 10x10 block matrix 202 | m'' = fromBlocks [[0 ,0 , ia, 0, 0, 0, 0 ,0,0,0], 203 | [0, 0, 0, ib, 0, 0, 0, 0,0,0 ] , 204 | [ia, 0, 0, 0, ia't, 0,0,0,0, 0], 205 | [0, ib, 0, 0, 0,ib't,0, 0,0 ,0], 206 | [0, 0, ia',0, m, 0 , ic',0,0,0], 207 | [0, 0, 0, ib',0, m', 0,id',0,0], 208 | [0, 0, 0, 0, ic't,0 ,0,0,ic,0], 209 | [0, 0, 0, 0, 0,id't,0,0,0,id], 210 | [0, 0, 0, 0, 0, 0, ic,0,0,0], 211 | [0, 0, 0, 0, 0, 0, 0,id,0,0]] 212 | w'' = vjoin [konst 0 (2*n), w,w', konst 0 (2*n')] 213 | c'' = c + c' 214 | 215 | dup :: forall a. BEnum a => QuadFun a (a,a) 216 | dup = QuadFun m 0 0 where -- 1 input, 2 lagrange mutipliers, and 2 outputs. 217 | ia = ident (count @a) 218 | m = fromBlocks [ [0, ia,ia, 0,0], 219 | [ia, 0, 0, -ia,0], 220 | [0, ia, 0, 0, -ia], 221 | [0,-ia, 0, 0, 0 ], 222 | [0, 0, -ia, 0,0]] 223 | 224 | -- fst...? 225 | -- for snd, we could just leave it alone 226 | -- fuse? 227 | -- swap. 228 | data Void 229 | fuse :: forall a. BEnum a => QuadFun (a,a) Void 230 | fuse = QuadFun m 0 0 where -- 2 inputs, 1 lagrange multiplier. 231 | ia = ident (count @a) 232 | m = fromBlocks [[0,0, ia], 233 | [0 ,0 , -ia], 234 | [ia, -ia, 0]] 235 | 236 | -- The analog for this slicing for convex sets may be to slice the 237 | -- dimensions into in and out dimensions. Then 238 | 239 | 240 | 241 | data Cell a = Cell {phi :: Double, j :: Double, next :: a} deriving (Show, Traversable, Foldable, Functor) -- composition of cell is a statically sized vector. Derive applicative? 242 | 243 | 244 | 245 | instance Applicative Cell where 246 | pure x = Cell 0 0 x 247 | (Cell phi1 j1 f) <*> (Cell phi2 j2 x) = Cell (phi1 + phi2) (j1 + j2) (f x) -- ? I doubt this makes any sense. 248 | {- 249 | data Cell a = Cell { phi :: a, j :: a} 250 | type f :+: g = Product f g 251 | gaussK :: Lens ((Cell :+: Cell) a) (Cell a) 252 | 253 | gaussK :: -> Lens (Cell :*: f a) -- no Cell is very not this. 254 | 255 | type Row = (Cell Cell Cell Cell Cell) 256 | 257 | 258 | Lens (a, b, other) (b, other) 259 | 260 | 261 | gaussK :: Lens 262 | 263 | data Cell2 f a b = Cell2 {phi :: a , j :: a, next :: f b} 264 | data Cell2 f a = Cell2 {phi :: f Double a , j ::f Double, next :: f a} 265 | 266 | -- zipA :: f a -> f b -> f (a,b) 267 | -- zipA x y = (,) <$> x <*> y 268 | -- monProd 269 | 270 | fmap2 = fmap . fmap 271 | fmap4 = fmap2 . fmap2 272 | fmap8 = fmap4 . fmap4 273 | 274 | zipA x y = (,) <$> x <*> y 275 | zip2 = zipA . (fmap zipA) -- hmm. Maybe we should be using Compose. we're going to need famp2. Compose will get us these instances for free. 276 | zip4 = zip2 . (fmap2 zip2) 277 | zip8 = zip4 . (fmap4 zip4) 278 | 279 | 280 | parK :: Lens (f b) b -> Lens (g a) a -> Lens (f g a) a 281 | parK :: forall b. Lens (f b) (f' b) -> Lens (g a) (g' a) -> Lens (f g a) (f' (g' a)) 282 | parK :: forall b. Lens (f g a) (f' g a) -> Lens (g a) (g' a) -> Lens (f g a) (f' (g' a)) 283 | parK = compose (fmap l2) l1 284 | 285 | 286 | 287 | 288 | Do the y direction inductively 289 | 290 | ydir :: Lens (Cell a, Cell a) (a,a) 291 | ydir = \(Cell phi j x, Cell phi2 j2 y) -> ((x,y), \(x',y') -> (Cell phi j x, Cell phi2 j2 y) ) 292 | 293 | 294 | ydir :: Lens (Cell a, Cell a) (a,a) 295 | ydir = \(a,b) -> ((Cell phi j x) , f) = (gaussK a) in ((Cell phi j y) , g) = (gaussK b) in 296 | 297 | Lens ( (X (), (X (), a) (X (), a) 298 | ydir = \(r1, (r2, z) -> zip8 ) -- = gaussK r1 in = gaussK r2 in 299 | 300 | Lens (f a) a -> Lens (f a) a -> Lens (f a, f a) a 301 | Lens (Cell a) a -> Lens 302 | 303 | Lens x y x' y' 304 | 305 | -} 306 | 307 | {- 308 | can i just use regular lenses? I need to set in kind of a weird way though. 309 | -} 310 | 311 | data Lens a b = Lens (a -> (b, b -> a)) 312 | -- newtype Lens a b = Lens forall r. (a -> (b -> (b -> a) -> r) -> r) -- cpsify for speed? van Laarhoven? 313 | 314 | -- SLens s a b = SLens (a -> ST s (b, b -> ST s a)) mutable update 315 | -- MLens a b = MLens (a -> m (b , b -> m a)) -- monadic lens 316 | -- KLens a b = KLens (a -> (b, k b a)) ---- this is sort of what Conal wrote. 317 | comp :: Lens b c -> Lens a b -> Lens a c 318 | comp (Lens f) (Lens g) = Lens $ \x -> let (y, g') = g x in 319 | let (z, f') = f y in 320 | (z, g' . f') 321 | 322 | -- Gauss Seidel of the standard 1 -2 1 matrix 323 | -- j1 is the effective source incliuding the influence from phi values up in the stack 324 | -- Double Cell construction is rather wonky 325 | -- gaussK :: Vec (S (S n)) Double -> Vec (S n) 326 | -- we could also go for the lagrange mutiplier interface. 327 | -- can also push the rho value needed for stability up and down. 328 | 329 | -- -2 phi1 + 1 phi2 = ~j1 330 | -- -1 phi1 + -2 phi2 + ...? = j2 331 | 332 | {- 333 | We are moving the lower diagonal to the right hand side as the splitting. 334 | That ends up to claculating an effective j based on previous values. 335 | We then triangular solve the upper diagonal, which we are able to find the new diagonal element in terms of the lower values. 336 | 337 | -} 338 | -- it's kind of weird that we mutate j on the way down but restore it on the way up. 339 | -- But we do need a way to access j. (Phi (Phi a), J a) (Phi ) 340 | 341 | gaussK :: Lens (Cell (Cell a)) (Cell a) -- we need the context Cell 342 | gaussK = Lens $ \case (Cell phi1 j1 (Cell phi2 j2 y)) -> (Cell phi2 (j2 - phi1) y , \case (Cell phi2' j2' z) -> let j1' = j1 - phi2' in -- moving the triangular upsolve to the right hand side 343 | Cell (- j1' / 2) j1 (Cell phi2' j2 z)) 344 | 345 | -- interface in , internals, interface out. was the previous way of talking about it. But then the internals grow, which is fine 346 | -- compose :: Lens in internal out -> Lens in' internal' out' -> Lens in (internal, out, in', internal') out 347 | -- some ind of 2 category? Enriched? The morphisms have this internal structure. 348 | -- this is more a containing relationship. The larger context can be converted into the smaller context. 349 | -- 350 | 351 | g2 :: Lens (Cell (Cell (Cell a))) (Cell a) 352 | g2 = gaussK `comp` gaussK 353 | g4 = g2 `comp` g2 354 | g8 = g4 `comp` g4 355 | g16 = g8 `comp` g8 356 | g32 = g16 `comp` g16 357 | 358 | capZero :: Lens (Cell ()) () -- not even sure i really need this? does runGauss (f `comp` capZero) ~ runGauss f 359 | capZero = Lens $ \case (Cell phi j _) -> ((), \_ -> Cell (- j / 2) j ()) 360 | 361 | -- runGauss :: Lens a b -> a -> a -- removes the open ended nature of the thing. 362 | runGauss :: Lens a () -> a -> a -- this might make more sense as what you really want. This is some kind of cap operation. 363 | runGauss (Lens f) x = let (y , trisolve) = f x in trisolve y 364 | startingVal :: Cell (Cell (Cell ())) 365 | startingVal = (Cell 0 0 (Cell 0 1 (Cell 0 0 ()))) 366 | 367 | iters = iterate (runGauss (capZero `comp` g2)) startingVal 368 | --iters' = iterate (runGauss g2) startingVal -- They are different. 369 | 370 | sol = ((3><3) [-2,1,0, 371 | 1,-2,1, 372 | 0,1,-2]) <\> (vector [0,1,0]) 373 | 374 | -- can do inner block solves also (via an iterative method perhaps) 375 | -- this is reminsecent of a multiscale attack. 376 | parC :: Lens a a' -> Lens b b' -> Lens (a,b) (a',b') 377 | parC (Lens f) (Lens g) = Lens $ \(x,y) -> let (x', f') = f x in 378 | let (y', g') = g y in 379 | ((x',y') , f' *** g') 380 | 381 | -- profucntor optics paper 382 | data FunList a b t = Done t | More a (FunList a b (b -> t)) 383 | -- contents and fill 384 | -- contents (s -> a^n) 385 | -- fill s -> b^n -> t 386 | -- s -> exists n. (a^n, a^n -> s) is a traversal. Could use a Vec. But we already have a Vec. Cell is a Vec 387 | -- s -> exists n, (Vec n a, Vec n a -> s) 388 | -- maybe we do need an applicative. We sort of need to zip together two rows to start going 2d. 389 | 390 | -- huh. A block wise schur is kind of the monoidal ppoduct here. No. monoidal product is pure dsum. 391 | -- actually composition? is kind of what plays the game of block wise stuff. 392 | {- 393 | 394 | if we want this to be not the case, 395 | comp :: Lens (a,a') a' 396 | 397 | 398 | schur :: Lens a a' -> (a -> b) -> (b -> a) -> Lens b b' -> Lens (a,b) (a',b') 399 | schur (Lens a) b c (Lens d) = Lens $ \(x,y) -> let (x', a') = a x in 400 | let (y', d') = d x in 401 | ( ) 402 | -} 403 | -- Lens (f (g a)) (g a) 404 | 405 | -- we can change this all into a continuation form 406 | -- 407 | 408 | {- 409 | par :: forall a b c d. (BEnum a, BEnum b, BEnum c, BEnum d) => BMatrix a b -> 410 | BMatrix c d -> BMatrix (a,c) (b,d) 411 | par q@(QuadFun m w r) q'@(QuadFun m' w' r') = QuadFun q'' w'' r'' where 412 | a' = diagBlock [sliceA q, sliceA q'] 413 | b' = diagBlock [sliceB q, sliceB q'] 414 | c' = diagBlock [sliceC q, sliceC q'] 415 | d' = diagBlock [sliceD q, sliceD q'] 416 | q'' = fromBlocks [[a',b'], [c',d']] 417 | u' = vjoin [sliceU v, sliceU v'] 418 | v' = vjoin [sliceV v, sliceV v'] 419 | w'' = vjoin [u', v'] 420 | r'' = r + r' 421 | 422 | 423 | 424 | 425 | 426 | sliceA :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> M 427 | sliceA (QuadFun m v c) = subMatrix (0,0) (count @a, count @a) m 428 | sliceB :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> M 429 | sliceB (QuadFun m v c) = subMatrix (0,count @a) (count @a, count @b) m 430 | sliceC :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> M 431 | sliceC (QuadFun m v c) = subMatrix (count @a,0) (count @b, count @a) m 432 | sliceD :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> M 433 | sliceD (QuadFun m v c) = subMatrix (count @a,count @a) (count @b, count @b) m 434 | 435 | sliceU :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> Vector Double 436 | sliceU (QuadFun m v c) = subVector 0 (count @a) v 437 | sliceV :: forall a b. (BEnum a, BEnum b) => QuadFun a b -> Vector Double 438 | sliceV (QuadFun m v c) = subVector (count @a) (count @b) v 439 | 440 | n = count @a 441 | m = count @b 442 | q = rows m 443 | p = q - m 444 | a = subMatrix (0,0) (count @a, count @a) m 445 | 446 | 447 | a = subMatrix 448 | --Other options 449 | -- Keep a matrix with the end and the beginning are the 450 | 451 | -- Keep Constraints too? 452 | 453 | -- what in the world 454 | -- stack build --ghc-options /usr/lib/libiconv.dylib 455 | 456 | -- size 457 | -- rows 458 | -- cols 459 | 460 | compose :: QuadFun b c -> QuadFun a b -> QuadFun a c 461 | compose (QuadFun m w c) (QuadFun m' w' c') = QuadFun m'' w'' c'' where 462 | n = (count @b) 463 | corner = fromBlocks [[ident n, 0],[0, -(ident n)]] 464 | corner' = fromBlocks [[0,0],[corner,0]] 465 | m'' = fromBlocks [[m ,corner'], [tr corner', m']] 466 | w'' = vjoin [w, const 0 n, w'] 467 | c'' = c + c 468 | 469 | par (QuadFun m w c) (QuadFun m' w' c') = 470 | 471 | 472 | type a :+: b = Either a b 473 | data QuadFun x = QuadFun 474 | 475 | 476 | ((a,b,c), 477 | (d,e,f), 478 | (g,h,i)) = 479 | 480 | 481 | data BMatrix' = BMatrix' M M M M M M M M M 482 | compose (BMatrix' a' b' c' d' e' f' g' h' i') (BMatrix' a b c d e f g h i) = 483 | BMatrix' a'' b'' c'' d'' e'' f'' g'' h'' i'' where 484 | a'' = a 485 | b'' = b ||| c ||| 0 486 | c'' = 0 487 | d'' = d === g === 0 488 | e'' = fromBlocks [[e,f,0], [h, i + a', b'], [0, d', e']] 489 | f'' = 0 === c' === f' 490 | g'' = 0 491 | h'' = 0 ||| g' ||| h' 492 | i'' = i' 493 | compose' (BVec u' v' w') (BVec u v w) = BVec u (vjoin [v ,w + u', v']) w' 494 | 495 | 496 | par :: QuadFun a b -> QuadFun c d -> QuadFun (Either a c) (Either b d) 497 | par (BMatrix' a' b' c' d' e' f' g' h' i') (BMatrix' a b c d e f g h i) = 498 | BMatrix' a'' b'' c'' d'' e'' f'' g'' h'' i'' where 499 | a'' = diagBlock [a',a] 500 | b'' = diagBlock [b',b] 501 | c'' = diagBlock [c',c] 502 | d'' = diagBlock [d',d] 503 | e'' = diagBlock [e',e] 504 | f'' = diagBlock [f',f] 505 | g'' = diagBlock [g',g] 506 | h'' = diagBlock [h',h] 507 | i'' = diagBlock [i',i] 508 | 509 | -- If we're just recording the entire program, we might as well just record it in a DSL 510 | -- rather than directly building the matrix 511 | -- data QuadFunDSL = Par QuadFun 512 | -- FreeCat + (Lit HMatrix) 513 | 514 | -- iterative matrix solution proximal method? 515 | -- 516 | -- gauss seidel 517 | type VBlock = [Vector Double] 518 | -- (V, V) 519 | -- Maybe schur solve works. But with possible splitting 520 | -- Maybe unsolvable schur 521 | -- could damp a little if the diagonal isn't dominant enough 522 | -- (lusolve a (w - V <#| v', ) -- WHat I'm suggesting here is block Jacobi? No 523 | -- No it isn't. Because we're passing back v' it is guass seidel. 524 | -- in order to do block gauss seidel 525 | -- type L = VBlock -> (VBlock, VBlock -> VBlock) 526 | 527 | -- Writing in the monoidal categroy style makes parallelism more apparent. 528 | 529 | 530 | -- [vjoin blocks] -> ([lesser blocks] , update [lesserblocks] -> [blocks] 531 | -- \x:xs -> (a*x + xs, \b -> b ) 532 | 533 | -- storage in the lens? 534 | -- In my AD lens, I had all the weights in the input. This was ungainly 535 | -- Maybe a choice monad? Everyone could have access to a global state 536 | -- forall s. (ActualLens s a, a -> (b, db -> da)) 537 | -- compsoe (ActualLens s a, a -> (b, db -> da)) 538 | -- par 539 | -- ActualLens are functional references. Do they allow us to refiy sharing? 540 | -- (ActualLens acc a, ) 541 | 542 | -- alternative a -> State s (b, db -> State ds da) 543 | 544 | -- Mutiple directions of composition. Horizontsl and vertical? 545 | -- a 2-category? 546 | -- ActualLens s a -> ActualLens s' s 547 | 548 | -- Jules Hedges: 549 | -- Lens sigma a b -> Lens sigm' b c -> Lens (sig,sig') a c 550 | -- Lens :: Sigma -> (sigma, ) -> Lens a b -- hides sigma, we'll never be able to get at it 551 | 552 | -} -------------------------------------------------------------------------------- /src/Cone.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DerivingVia, GeneralizedNewtypeDeriving, DeriveTraversable, ScopedTypeVariables, TypeApplications #-} 2 | 3 | module Cone where 4 | 5 | import Linear.Metric 6 | import Linear.Vector 7 | import Linear.V4 8 | import Linear.Epsilon 9 | import Data.Coerce 10 | import Linear.V1 11 | import Linear.V2 12 | import Linear.V3 13 | import Data.List (unfoldr) 14 | import Data.Functor.Product 15 | -- import Data.Maybe (fromMaybe) 16 | 17 | 18 | 19 | -- look at Linear.Affine for Point, which is what we're modelling off of. I took some typeclasses out that I don't know what they do. 20 | -- There is a slot for the shape of the space f (_^7 7 dimensional or whatever) and a slot for the underlying number a. 21 | newtype Ray f a = Ray (f a) deriving (Eq, Ord, Show, Read, Monad, Functor, Applicative, Foldable, Traversable, Additive, Metric, Fractional , Num, Epsilon) 22 | newtype Dual f a = Dual (f a) deriving (Eq, Ord, Show, Read, Monad, Functor, Applicative, Foldable, Traversable, Additive, Metric, Fractional , Num, Epsilon) 23 | type HalfSpace f a = Dual (Ray f) a -- should I be using Compose? 24 | 25 | -- I'm cool with these. 26 | absorbdual :: Dual (Dual f) a -> f a 27 | absorbdual = coerce 28 | 29 | injectdual :: f a -> Dual (Dual f) a 30 | injectdual = coerce 31 | 32 | 33 | -- The folllowing feel very ad hoc in their usage. The make me queasy 34 | polar :: HalfSpace f a -> Ray f a 35 | polar = coerce 36 | 37 | dpolar :: Ray f a -> HalfSpace f a 38 | dpolar = coerce 39 | 40 | dual :: f a -> (Dual f) a 41 | dual = coerce 42 | 43 | dual' :: (Dual f) a -> f a 44 | dual' d = absorbdual (dual d) 45 | 46 | -- ConvSet returns Nothing if (f a) is in the set and (Just Dual) giving a hyperplane in which the entire set is contained for which the original argument is not in the set. 47 | -- In other words, it tells you if in set, or gives simple "proof" that you are not in the set. And a clue for possible future inquiries. 48 | type ConvCone f a = f a -> Maybe (Dual f a) 49 | -- forcing it to give you something extremal in some sense is nice. 50 | type ConvCone' f a = f a -> Maybe (Dual f a, f a) -- 51 | 52 | {- 53 | Gives nearest cone to given ray and dual supporting plane. Hmm. Maybe I don't have to give f a back because you can just project it onto the given plane if you like. 54 | I think standard projection works fine 55 | We're implcitly using max (u dot v) / |u| |v| s.t. u in K 56 | This is indeed a reasonable kind of the conification of the support function. 57 | The support function form of a convex set is a black box that can give the maximal point for any linear obective. 58 | I've struggled with 59 | 60 | -} 61 | 62 | {- 63 | Convex Relation need an intput and output 64 | 65 | trans :: ConvRel (f :*: g) h -> ConvRel f (g :*: h) 66 | untrans = inverse 67 | 68 | converse :: ConvRel f g -> ConRel g f 69 | compose :: ConvRel f g -> ConvRel g h -> ConvRel f h -- hmm. 70 | meet :: ConvRel f g -> ConvRel f g -> ConvRel f g 71 | join :: ConvRel f g -> ConvRel f g -> ConvRel f g 72 | leftdiv :: --hmm 73 | rightdiv :: --hmm 74 | 75 | lift :: ConvCone f -> ConvRel f f 76 | dup :: ConvRel f (Product f f) 77 | par :: ConRel f g -> ConvRel h k -> ConvRel (Product f h) (Product g k) 78 | projEq = converse dup 79 | 80 | -} 81 | newtype ConvRel f g a = ConveRel (ConvCone (Product f g) a) 82 | 83 | -- Or could call objective 84 | 85 | newtype Max a = Max (V1 a) deriving (Eq, Ord, Show, Read, Monad, Functor, Applicative, Foldable, Traversable, Additive, Metric, Fractional , Num, Epsilon) 86 | newtype Domain f a = Domain (f a) deriving (Eq, Ord, Show, Read, Monad, Functor, Applicative, Foldable, Traversable, Additive, Metric, Fractional , Num, Epsilon) 87 | 88 | -- newtype ConvexProgSet f a = Max :*: (Domain f) a 89 | -- newtype 90 | type VRep f a = [Ray f a] 91 | type HRep f a = [HalfSpace f a] 92 | 93 | -- an hrep for the convex cone of a single ray. Consists of the polar of the ray to cut off the nagetive side, and a projection of a complete basis. Not linearly independent. Use orthogonalize and prune if you want that. 94 | hrep :: (Metric f, Traversable f, Fractional a) => Ray f a -> HRep f a 95 | hrep r = pplane : (fmap (projectoff pplane) (basisFor pplane)) where pplane = dpolar r 96 | 97 | vrep :: forall f a. (Metric f, Traversable f, Fractional a) => HalfSpace f a -> VRep f a 98 | vrep = coerce @(Ray f a -> HRep f a) @(HalfSpace f a -> VRep f a) hrep 99 | 100 | projectoff :: (Metric v, Fractional a) => v a -> v a -> v a -- subtracts the first argument off of the second 101 | projectoff b u = u ^-^ (project b u) 102 | 103 | projectonto :: (Metric v, Fractional a) => Dual v a -> v a -> v a -- subtracts the first argument off of the second 104 | projectonto b u = u ^-^ (project (dual' b) u) 105 | 106 | reflectover :: (Metric v, Fractional a) => v a -> v a -> v a -- reflect the second argument about the first 107 | reflectover b u = u ^-^ (2 *^ (project b u)) 108 | 109 | 110 | score :: (Metric f, Num a) => Ray f a -> HalfSpace f a -> a 111 | score v h = (polar h) `dot` v 112 | 113 | elemRH :: (Metric f, Ord a, Num a) => f a -> (Dual f) a -> Bool 114 | elemRH v h = (dual' h) `dot` v >= 0 115 | 116 | 117 | 118 | 119 | 120 | halfcone :: (Metric f, Ord a, Num a) => HalfSpace f a -> ConvCone (Ray f) a 121 | halfcone h r | r `elemRH` h = Nothing 122 | | otherwise = Just h 123 | {- 124 | halfcone' :: (Metric f, Ord a, Num a) => HalfSpace f a -> ConvCone (Ray f) a 125 | halfcone' h r | r `elemRH` h = Nothing 126 | | otherwise = Just h 127 | -} 128 | -- From a collection of halfplanes, get the worst scoring one. Seems like a good start for a greedy method. proto simplex. 129 | -- It might be wise to return a collection of the worst socring 130 | -- Or to keep a heap rather than a list? Ehh. 131 | -- I don't really need Ord f a, I need to only sort by the score 132 | hrep' :: (Metric f, Ord a, Num a, Ord (f a)) => HRep f a -> ConvCone (Ray f) a 133 | hrep' hs r = let (hurtiness, h) = minimum (map (\h -> (score r h, h)) hs) in if hurtiness >= 0 then Nothing else Just h 134 | 135 | 136 | 137 | intersectHH :: HRep f a -> HRep f a -> HRep f a 138 | intersectHH = (++) 139 | 140 | hullVV :: VRep f a -> VRep f a -> VRep f a 141 | hullVV = (++) 142 | 143 | -- intersection of VV, hull of HH, and hull of VH are more challenging 144 | -- the intersection of a VRep with an HRep is .. this is not right. We need to project the generators onto the set 145 | --intersectVH :: (Metric f, Num a, Ord a) => VRep f a -> HRep f a -> VRep f a 146 | -- intersectVH vs hs = filter (\v -> all (elemRH v) hs) vs 147 | 148 | 149 | --findRay :: ConvCone f a -> Ray f a -> [Ray f a] -- this is some relative of iterate. Perhaps unfold 150 | --findRay f r | Nothing <- f r = [r] 151 | -- | Just h <- f r = r : (unfoldr (\r' -> fmap (\h' -> (r', projectoff (dpolar h') r'))) (projectoff (dpolar h) r)) 152 | -- The simplest possible findRay. It might be a better idea to reflect over the returned support plane? If you project a polar, it will become zero. not good. 153 | -- reflection is a orthonomral transfromation which is nice. 154 | -- This is porbably a first order method. Maybe keep a running sum of dual planes with decreasing coefficients 1/k? Line search? 155 | -- The returned dual is a kind of subgradient. 156 | -- don't we really want this to returen [(Ray, Dual Ray)] pairs? 157 | -- what if the convex set is empty? only zero. Then all duals are in it's dual space. But the rays will never converge to zero? 158 | -- This is some kind of alternating projection method. With sort of chaotic ordering of the planes. 159 | -- we can also overshoot or undershoot. As I was saying about reflection rather than projection. Supposedly a 1.5 can be pretty good rather than 2. 160 | findRay :: (Metric f, Fractional a) => ConvCone (Ray f) a -> Ray f a -> [Ray f a] -- this is some relative of iterate. Perhaps unfold 161 | findRay f r | Nothing <- (f r) = [r] 162 | | Just h <- (f r) = r : (findRay f (projectoff (polar h) r)) 163 | 164 | -- orthogonal set of vectors. If we don't ever peek under the cover, we cna guarantee that it is orhtogonalized and pruned. 165 | newtype Orthogonal v a = Orthogonal [v a] -- This is the kind of thing Ghosts of departed proofs might be cool for. We oculd also type level tag orthogonalized according to different metrics 166 | -- really I may want other containers. I could paramtrize over a general traversable perhaps? 167 | -- I'm goingto very likely want a queue. Okasaki queue? Because I'll be popping off the end. 168 | -- And really, I probably want a mutable version of f itself 169 | 170 | 171 | orthogonalize :: forall v a. (Metric v, Fractional a) => [v a] -> [v a] -- Orthogonal v a 172 | orthogonalize vs = foldl (\bs v -> (projectbasis bs v) : bs) [] vs where 173 | projectbasis :: [v a] -> v a -> v a 174 | projectbasis basis v = foldr project v basis 175 | prune :: Epsilon a => [a] -> [a] 176 | prune = filter (not . nearZero) 177 | 178 | orthogonalize' :: forall v a. (Epsilon (v a), Metric v, Fractional a) => [v a] -> Orthogonal v a 179 | orthogonalize' hs = foldr appendOrthogonal nilOrthogonal hs 180 | 181 | nilOrthogonal :: Orthogonal f a 182 | nilOrthogonal = Orthogonal [] 183 | -- identity :: (Num a, Traversable t, Applicative t) => t (t a) 184 | orthobasis :: (Additive f, Traversable f, Num a) => Orthogonal f a 185 | orthobasis = Orthogonal basis 186 | 187 | -- This is where the money happens 188 | appendOrthogonal :: (Epsilon (v a), Metric v, Fractional a) => v a -> Orthogonal v a -> Orthogonal v a 189 | appendOrthogonal h (Orthogonal hs) = let h' = foldr projectoff h hs in if (nearZero h') then (Orthogonal hs) else Orthogonal (h' : hs) 190 | 191 | -- This is safe. Might be needed if I don't export the Orthogonal constructor. 192 | forgetOrthogonal :: Orthogonal f a -> [f a] 193 | forgetOrthogonal = coerce 194 | -- headOrthogonal 195 | -- tail Orthogonal 196 | -- dropLastOrhtoognal 197 | 198 | -- Does this make sense? I'm not 100% sure. 199 | -- projectOntoPlanes :: (HRep f a, Ray f a) -> (HRep f a, Ray f a) -- return the new orthogonal basis, pruned. We can completely avoid re-orthogonalizing also. 200 | projectOntoPlanes :: (Metric f, Fractional a) => HRep f a -> Ray f a -> Ray f a -- could merge this into a single orthognlaize pass by placing the ray into the 201 | projectOntoPlanes hs r = let hs' = (orthogonalize hs) in foldr (projectonto) r hs' 202 | 203 | projectOntoPlanes' :: (Metric f, Fractional a) => Orthogonal (Dual (Ray f)) a -> Ray f a -> Ray f a -- could merge this into a single orthognlaize pass by placing the ray into the 204 | projectOntoPlanes' (Orthogonal hs) r = foldr (projectonto) r hs 205 | 206 | -- if we greedy ask for hrep', and then projectOntoPlanes 207 | 208 | admmstep :: (Metric f, Fractional a) => ConvCone (Ray f) a -> ConvCone (Ray f) a -> (Ray f a, Ray f a, Ray f a) -> (Ray f a, Ray f a, Ray f a) 209 | admmstep f g (u1, u2, l) = let u2' = proj1 (u1 ^+^ l) in 210 | let u1' = proj2 (u2' ^-^ l) in 211 | let l' = l ^+^ u1' ^-^ u2' in -- did I get these signs right? I think so. 212 | (u1', u2', l') where 213 | proj1 upl = maybe upl (flip projectonto upl) (f upl) -- it does feel very likely to be some duplication of work here. 214 | proj2 uml = maybe uml (flip projectonto uml) (g uml) 215 | 216 | admm f g = iterate (admmstep f g) 217 | 218 | -- no. I'm not doing something right. I;m losing the planes so I don't get closure 219 | -- intersect' :: ConvCone (Ray f) a -> ConvCone (Ray f) a -> ConvCone (Ray f) a 220 | -- intersect' f g r = (take 20 (admm f g) 221 | 222 | ex2 = admm f g (Ray $ V2 1 0, Ray $ V2 1 0, Ray $ V2 0 0) where 223 | f = halfcone (Dual $ Ray (V2 1 0)) 224 | g = halfcone (Dual $ Ray (V2 (-1) 1)) 225 | 226 | 227 | 228 | data DD f a = DD {primalDD :: [f a], dualDD :: [Dual f a] } 229 | 230 | plane :: (Functor f, Num a) => HalfSpace f a -> HRep f a 231 | plane h = [h, (-1) *^ h] 232 | {- 233 | ddray :: (Traversable f, Additive f, Metric f, Fractional a) => Ray f a -> DD (Ray f) a 234 | ddray r = DD [r] hs where hs = concatMap (plane . polar . (projectonto (dpolar r))) basis -- slightly over complete. We could then orthogonalize to remove also not compiling. 235 | 236 | -} 237 | ddhalfspace :: (Traversable f, Additive f, Metric f, Fractional a) => HalfSpace f a -> DD (Ray f) a 238 | ddhalfspace h = DD rs [h] where rs = polar h : (map (projectonto h) (basisFor (polar h))) -- will be overcomplete. Could orthogonalize. 239 | 240 | ddpolar :: DD f a -> DD (Dual f) a 241 | ddpolar (DD rs hs) = DD (coerce hs) (coerce rs) 242 | 243 | 244 | 245 | ddintersect :: (Metric f, Ord a, Num a) => DD f a -> DD f a -> DD f a 246 | ddintersect (DD rs hs) (DD rs' hs') = DD ([r | r <- rs, all (elemRH r) hs'] ++ [r' | r' <- rs', all (elemRH r') hs]) (hs ++ hs') 247 | 248 | -- a simple pruning. Prune any point that isn't tight on at least one plane. Maybe prune the bigger one first? There can still be tight planes that nevertheless redundant. 249 | -- There is presumably a large literaturee about these problems 250 | -- We may also want to prune any vertices that are literally mutiples of each other. Prune any that are linear sums of others. 251 | -- So any ray should be on at least (D-1?) planes to actually be a corner of the polyhedral cone 252 | -- likewise any plane should touch at least D-1 ray generators. 253 | ddprune (DD rs hs) = let rs' = [r | r <- rs, not (nearZero r), any (\h -> nearZero (score r h)) hs] in 254 | let hs' = [h | h <- hs, not (nearZero h), any (\r -> nearZero (score r h)) rs] in 255 | DD rs' hs' 256 | 257 | 258 | -- cone containement of Hrep. Ax >= 0 ==> Bx >= 0 if exists D >=0 s.t. B >= DA. D is a linear derivation of the second equation from the first 259 | 260 | -- ddhull (DD rs hs) (DD rs' hs') = DD (rs ++ rs') ? 261 | {- 262 | There is an analong of admm that would work on lists of contraints, if that's what you're into. 263 | if I find a fixed point of admmstep, 264 | I kind of get the sense this is not ok. pushing the fixes out later. 265 | Is there some constraint that I need on the individual lambda? 266 | admmstep (halfcone fix admmstep) (halfcone fix admmstep) <=> ??? fix (addmmstep (admmsteo) (admmstep)) 267 | 268 | Maybe pack together u1 u2 into a larger product space, and then have the full constraint that u11 = u12, u21 = u22. 269 | Or we could arbitrarily select u1 or u2 from the left or right side 270 | Would I want to have some kind of queue system for my convex constraints rather than just sweeping? It seems crazy to keep checking that some irrelevant constraint keeps getting satisfied. 271 | Like count how many times it has not been obtained. 272 | 273 | 274 | f upl is returning a halfplane, then we use that hyperplane to project. (since that is fast and easy) 275 | could keep a running thing of the post recent hyperplanes we've seen. Dump them if the dimension is greater than f or if they are 0. 276 | 277 | 278 | 279 | I feel like we should be keeping a record of our previous support planes. We're implicitly in the thrall of our previous planes. 280 | And yes, we should keep a queue of the planes, such that the most recently disobeyed ones are at the top? Or the queue could be of fixed size? 281 | Changing the ordering of the queue is a curious manipulation on the lambda. 282 | The lambdas array is about connections between planes, so it should be of size one less that Hrep list. 283 | Maybe as soon as something returns happy, we dump that plane, and combine it's lambdas somehow. 284 | (HalfSpace, Ray, [(HalfSpace, Ray, Lambda)]) Gives inherently correct data structure. 285 | selfadmmstep :: ConvCone -> (HRep, Ray, [Lambdas] -> (HRep, Ray, [Lambdas]) 286 | proj1 (u1 ^+^ lprev ^_^ lnext) -- all except the very first one. 287 | lprev' = yaday 288 | 289 | This makes sense. I might even be convinced that such a method is correct. We're implicitly secviring convex cones as the interesction of there half spaces. We are allow to 290 | "ask" sort of for particle useful planes, the ones that disprove particular rays are in the set. 291 | 292 | This procedure probably does not finitely completely converge? Just gives increasingly good approximations? 293 | Orthogonalization of our halfspaces might help. Then we can gurantee we aren't bouncing into each other's faces. Projection of one does not affect projection of the others. 294 | Maybe a generalization of orthogonlaization. The two directions of a hyper plane are different things. 295 | A complete set of rays under non negative multiples is of size 2*d. Any point can be written as a sum of these. 296 | I dunno. Maybe this makes no sense. Orthogonalization might help constrain you to orthants. 297 | 298 | I feel that if your cone has an interior, we might be good. We'll find it? But if it doesn't... 299 | The paper mentions an alpha parameter alpha u + (1-alpha) u' = u', alpha = 1 is pure. But upper or lower is over relxation and under relaxation 300 | https://web.stanford.edu/~boyd/papers/pdf/scs_long.pdf 301 | Seems very evocative of the symmettric over relaxation. Since ADMM is a guass jacobi style algorithm 302 | https://en.wikipedia.org/wiki/Successive_over-relaxation 303 | tuning parameters make me ill. I guess guarantees have been out the window anyhow. 304 | 305 | Perhaps ConvexSet should be able to return Feasible | HalfSpace | Plane. Approximating a plane as the intersection of two halfspaces seems like trouble. How do we know we'll ever get both? Well, I guess it both are relevant we'll eventually probe there. No I think this is just an optimization. A good one probably. 306 | 307 | Q: is there a reasonable analog of GMRES for convex problems? GMRES is using a found Krylov space and doing least squares in it. 308 | We are also doing a found space. I have suppose you could send out our explored planes to an external LP solver. 309 | One can also prune redundant halfspace constraints using LP solves. Wheels within wheels. 310 | 311 | Is what I'm trying to do obscene? Purely functional, no mutation all sorts of wasted doubled ops for the sake of composition. 312 | 313 | 314 | did i mention yet that we should probably also include series acceleration? a la Hughes perhaps. Anderson, Richardson, who knows 315 | hmm succesevive over relaxation says this is related to rhicardson extarpolation. Interesting. 316 | 317 | 318 | A different interpetation of the ConvexCone function 319 | What if it is required to return a dual plane such that. 320 | a. all of the convex cone is in the halfspace 321 | b. the projection of the given ray onto the halfspace is in the set. 322 | b is a much stronger condition than just the support condition a. 323 | The primitive halfspace function does satisfy this of course. 324 | Fixed admm converges to satisfying this... uh. Maybe 325 | Djiktra projection method. Is dykstra's projection exactly equilvanet to ADMM or not? Has an extra variable, but doesn't seem like it needs one? It does all told look more symmetrical 326 | 327 | 328 | I guess my concern is that the thing could return an exact 329 | 330 | If in the interior of the cone, we could have it return nothing or perhaps a similar hyperplane? 331 | 332 | https://web.stanford.edu/class/ee392o/alt_proj.pdf 333 | 334 | -} 335 | {- 336 | 337 | If we take linear inhomogenous contraints to homgenous linear constraint in d+1, gram Schmidt becomes a solution method for 338 | it also corresponds to working with the augmented matrix. 339 | This is a more geometrical method of finding nullspaces. Gaussian alemination is always performed relative to an extrnal basis. And QR 340 | Sparsity is also not a bisis independent thing 341 | 342 | The convexSet representation doesn't lose us that much with resepnt to jus ta plane. We get the plane directly back 343 | The functional representation of a linear map requires reconstitution of the linear map with a solve... what is my point? 344 | 345 | linear maps can be thought of as linear relations. I dunno. 346 | 347 | a linear subspace is also a convex set. 348 | type LinearSubSpace :: Ray f a -> Maybe (Plane f a) 349 | 350 | linearsub :: Plane f a -> 351 | Solvoing a system of linear equaltites is ocnverting from an Hrep to a VRep 352 | 353 | 354 | We can also work external solvers into this framework. For example, we might use OSQP. 355 | 356 | We could get further clues from the set rather than just a dual plane. We could get our approximate distance for example. The local approximation of the function 357 | d(y) = min { |x - y| | x \in Set} -> derivative and hessian. 358 | This seems like it would roughly corresopnd to a newton / 2nd order method. 359 | 360 | we need to start building a tower of embeddings. 361 | self dual embeddings, 362 | affine embeddings, 363 | function to set problems 364 | minimization to feasibility problems 365 | 366 | -} 367 | 368 | {- 369 | 370 | 371 | Does the optimization package make all this ridiculous? 372 | 373 | I like the penalty interior point method. Hmm. Actually, wait, they are using penalty, not interior point. Why is that? 374 | Anyhow, that is good enough probably for a demo mixed integer program using logicT / list search. 375 | 376 | 377 | 378 | If it's easy, maybe it makes sense to compile to glpk or cbc 379 | https://kul-forbes.github.io/scs/ 380 | https://www.chrisstucchio.com/blog/2012/linear_programming.html 381 | 382 | 383 | A couple algorithms: 384 | Just iterated projection 385 | ADMM styel. sort of damped projection 386 | take SVD of all active planes. Kind of an interior point method 387 | Do sinlge round of QR algorithm for svd. Low rank update of 388 | Guass newton aplicaable? 389 | min_{|x|=1} (a^T x) ^2 / (a^T a) 390 | 391 | SVD 392 | 393 | -} 394 | 395 | {- 396 | # Convex Programming in Haskell Stuff 397 | 398 | I feel pulled in multiple directions. I want to wait to write a blog post until the ideas in it have some level of completion. But sometimes that level never comes. My attention may wander off. And yet, I feel I often have some interesting half built things to show. 399 | I have many loves in my life, but two of them are Haskell and Convex Programming. 400 | 401 | In a previous post, I talked a bit about how reducing convex programming to questions about cones leads to elegance and simplification. 402 | 403 | There are a couple of approaches to well-typed vectors in Haskell. One of them is to consder an f :: * -> * parameter as characterizing a vector space. We intend to fill in this with things like V4. 404 | data V4 a = V4 a a a a 405 | A vector space can be consdiered $R^n$. The f shape is sort of the $-^n$ part. The part that tells us the shape/ size of the vector space. We can then choose to fill it in, making is a vector space over Doubles or Complex Doubles or something more exotic. 406 | The fundamental construct we want to talk about for cones is the Ray, rather than the vector or point. A Ray is a direction. We can represent it with a vector, if we ignore the magnitude of the vector or if we choose to always work with normalized vectors (which is less elegant really). 407 | A cone is a set that is closed under addition and non negative scalar multiplication. It is a convex set of rays. 408 | The dual space to rays are the halfspace cones. They can be naturally parametrized also by vectors (basically the vectors normal to the planes pointing in the direction of halfspace you want). Any ray that has positive dot product with this vector is in the halfspace, which gives a simple test. 409 | 410 | Polytopes have (at least) two natural representations. They can be expressed as a sum of generator rays (the corners of the polytope) or as the set obeying a set of halfplane constraints. The first is called the V representation and the latter the H representation. The two are highly interconnected by duality. 411 | 412 | The difficulty and method for solving these qeustions can depend strongly on the representation you have 413 | 414 | What are geoemtrically interesting questions to ask. 415 | 1. Whether a ray is in a cone. 416 | + Easy given HRep. Just do all the dot products 417 | + 418 | 2. Finding a ray that is in a cone 419 | - Any ray or an extremal ray? Extremal according to what? 420 | - Projecting to a cone. There are metrics available. The cosine metric. The Chord metric. Others? They feel a bit goofy. They feel far less elegant than the quadratic metric of a vector space. The dot product is a somewhat natural thing to talk about, but by design. Abs metric? You can use the unit plane. and put a metric on it. The cone space is a relative of projective space. which has similar problems. The cross ratio? Logarithm of dot product? Maybe you do need a plane to work on to have a reasonable deifition of extremal. Etremal with respect to a cone generalized inequality on the euclidean space. Extremal with respect to another cone? But a particular kind of cone. It contains (0,0,0,1)? A projection excluding (0,0,0,1) 421 | 422 | 423 | 4. Convert VRep to HRep and vice versa 424 | + take every possible choice out of set. Do linear solve. Check if linear solve matches everything else. 425 | 5. Intersections 426 | + Alternating Projecction. The most primitive and obvious. 427 | + ADMM. 428 | 6. Minkowski Sums = Convex Hull 429 | 7. Projection 430 | 8. Pruning Redundancy 431 | 9. Cone Containment Testing. 432 | 433 | -} -------------------------------------------------------------------------------- /src/LinRel.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds, ScopedTypeVariables, TypeApplications, AllowAmbiguousTypes, NoImplicitPrelude, 2 | GeneralizedNewtypeDeriving, FlexibleContexts 3 | #-} 4 | module LinRel where 5 | 6 | import Numeric.LinearAlgebra 7 | import Prelude hiding ((<>)) 8 | import Debug.Trace 9 | type BEnum a = (Enum a, Bounded a) 10 | enumAll :: (BEnum a) => [a] -- What about Void? 11 | enumAll = [minBound .. maxBound] 12 | 13 | 14 | 15 | -- A Hacked "Void" type to get card @Void = 0 16 | data Void = Void' | Void'' -- SORRY MOM. 17 | instance Enum Void where 18 | fromEnum Void' = 1 19 | fromEnum Void'' = 0 20 | toEnum 1 = Void' 21 | toEnum 0 = Void'' 22 | instance Bounded Void where 23 | maxBound = Void'' 24 | minBound = Void' 25 | 26 | instance (Enum a, Enum b, Bounded a) => Enum (Either a b) where 27 | fromEnum (Left a) = fromEnum a 28 | fromEnum (Right b) = fromEnum b + fromEnum (maxBound @a) + 1 29 | toEnum n | n <= ((fromEnum (maxBound @a)) - (fromEnum (minBound @a))) = Left (toEnum n) 30 | | otherwise = Right (toEnum (n - fromEnum (maxBound @a))) 31 | 32 | 33 | instance (Bounded a, Bounded b) => Bounded (Either a b) where 34 | maxBound = Right maxBound 35 | minBound = Left minBound 36 | 37 | card :: forall a. (BEnum a) => Int 38 | card = (fromEnum (maxBound @a)) - (fromEnum (minBound @a)) + 1 39 | 40 | -- HLinRel holds A x = b constraint 41 | data HLinRel a b = HLinRel (Matrix Double) (Vector Double) deriving Show 42 | 43 | -- x = A l + b. Generator constraint. 44 | data VLinRel a b = VLinRel (Matrix Double) (Vector Double) deriving Show 45 | 46 | 47 | -- f(x) = xQx + bx 48 | data QuadOp a b = QuadOp (Matrix Double) (Vector Double) 49 | {- 50 | qid :: QuadOp a a 51 | qid = ? -- a = a ? Need Relations too. 52 | 53 | qcompose :: QuadOp b c -> QuadOp a b -> QuadOp a c 54 | qcompose (QuadOp q c) (QuadOp q' c') = QuadOp q'' c'' where 55 | where 56 | ca = card @a 57 | cb = card @b 58 | cc = card @c 59 | a = subMatrix (cb, cb) q 60 | b = subMatrix (cb,cc) q 61 | c = subMatrix (cc, cb) q 62 | d = subMatrix (cc,cc) q 63 | a' = subMatrix (ca, ca) q' 64 | b' = subMatrix (ca, cb) q' 65 | c' = subMatrix (cb, ca q' 66 | d' = subMatrix (cb, cb) q' 67 | m = - (a + d') 68 | q'' = fromBlocks [[a' + b' <> m c' , b' <> m c ], -- can memoize some of this 69 | [b <> m c' , d - ]] 70 | 71 | a'' = a' - b' <> m c' 72 | [v1', v2'] = takesV [ca,cb] c' 73 | [v1, v2] = takesV [cb, cc] c 74 | v3 = (v2' + v1) 75 | c'' = vJoin [v1' + m c #> v3, ] 76 | 77 | -} 78 | {- 79 | 80 | Any cost outside the constraint space is irrelevant. 81 | Q = V m V where m is fullrank. 82 | c = Vc also 83 | 84 | minimal sets 85 | -} 86 | 87 | -- break into pieces, form schur complement. 88 | 89 | -- if A x = b then x is in the nullspace + a vector b' solves the equation 90 | h2v :: HLinRel a b -> VLinRel a b 91 | h2v (HLinRel a b) = VLinRel a' b' where 92 | b' = a <\> b -- least squares solution 93 | a' = nullspace a 94 | 95 | -- if x = A l + b, then A' . x = A' A l + A' b = A' b because A' A = 0 96 | v2h :: VLinRel a b -> HLinRel a b 97 | v2h (VLinRel a' b') = HLinRel a b where 98 | b = a #> b' -- matrix multiply 99 | a = tr $ nullspace (tr a') -- orthogonal space to range of a. 100 | 101 | hid :: forall a. BEnum a => HLinRel a a 102 | hid = HLinRel (i ||| (- i)) (vzero s) where 103 | s = card @a 104 | i = ident s 105 | 106 | vzero :: Konst Double d c => d -> c Double 107 | vzero = konst 0 108 | 109 | hcompose :: forall a b c. (BEnum a, BEnum b, BEnum c) => HLinRel b c -> HLinRel a b -> HLinRel a c 110 | hcompose (HLinRel m b) (HLinRel m' b') = let a'' = fromBlocks [[ ma', mb' , 0 ], 111 | [ 0 , mb, mc ]] in 112 | let b'' = vjoin [b', b] in 113 | let (VLinRel q p) = h2v (HLinRel a'' b'') in -- kind of a misuse 114 | let q' = (takeRows ca q) -- drop rows belonging to @b 115 | === 116 | (dropRows (ca + cb) q) in 117 | let [x,y,z] = takesV [ca,cb,cc] p in 118 | let p'= vjoin [x,z] in -- rebuild without rows for @b 119 | v2h (VLinRel q' p') -- reconstruct HLinRel 120 | where 121 | ca = card @a 122 | cb = card @b 123 | cc = card @c 124 | sb = size b -- number of constraints in first relation 125 | sb' = size b' -- number of constraints in second relation 126 | ma' = takeColumns ca m' 127 | mb' = dropColumns ca m' 128 | mb = takeColumns cb m 129 | mc = dropColumns cb m 130 | 131 | (<<<) :: forall a b c. (BEnum a, BEnum b, BEnum c) => HLinRel b c -> HLinRel a b -> HLinRel a c 132 | (<<<) = hcompose 133 | -- stack the constraints 134 | hmeet :: HLinRel a b -> HLinRel a b -> HLinRel a b 135 | hmeet (HLinRel a b) (HLinRel a' b') = HLinRel (a === a') (vjoin [b,b']) 136 | 137 | 138 | 139 | 140 | {- If they don't meet are we still ok? 141 | 142 | I am not sure. Might be weird corner cases? 143 | 144 | -} 145 | 146 | hjoin :: HLinRel a b -> HLinRel a b -> HLinRel a b 147 | hjoin v w = v2h $ vjoin' (h2v v) (h2v w) 148 | 149 | -- hmatrix took vjoin from me :( 150 | -- joining means combining generators and adding a new generator 151 | -- Closed under affine combination l * x1 + (1 - l) * x2 152 | vjoin' :: VLinRel a b -> VLinRel a b -> VLinRel a b 153 | vjoin' (VLinRel a b) (VLinRel a' b') = VLinRel (a ||| a' ||| (asColumn (b - b'))) b 154 | 155 | -- no constraints, everything 156 | -- trivially true 157 | htop :: forall a b. (BEnum a, BEnum b) => HLinRel a b 158 | htop = HLinRel (vzero (1,ca + cb)) (konst 0 1) where 159 | ca = card @a 160 | cb = card @b 161 | {- 162 | hbottom :: forall a b. (BEnum a, BEnum b) => HLinRel a b 163 | hbottom = HLinRel (vzero (1,ca + cb)) (konst 1 1) where 164 | ca = card @a 165 | cb = card @b 166 | -} 167 | -- all the constraints! Only the origin. 168 | -- no. it should be the empty set. Impossible to satisfy. 169 | -- 0 x = 1 is impossible 170 | -- not gonna play nice. 171 | {- 172 | hbottom :: forall a b. (BEnum a, BEnum b) => HLinRel a b 173 | hbottom = HLinRel (ident (ca + cb)) (konst 0 (ca + cb)) where 174 | ca = card @a 175 | cb = card @b 176 | -} 177 | 178 | hconverse :: forall a b. (BEnum a, BEnum b) => HLinRel a b -> HLinRel b a 179 | hconverse (HLinRel a b) = HLinRel ( (dropColumns ca a) ||| (takeColumns ca a)) b where 180 | ca = card @a 181 | cb = card @b 182 | 183 | -- this is numerically unacceptable 184 | -- forall l. A' ( A l + b) == b' 185 | vhsub :: VLinRel a b -> HLinRel a b -> Bool 186 | vhsub (VLinRel a b) (HLinRel a' b') = (naa' <= 1e-10 * (norm_2 a') * (norm_2 a) ) && ((norm_2 ((a' #> b) - b')) <= 1e-10 * (norm_2 b') ) where 187 | naa' = norm_2 (a' <> a) 188 | 189 | hsub :: HLinRel a b -> HLinRel a b -> Bool 190 | hsub h1 h2 = vhsub (h2v h1) h2 191 | 192 | heq :: HLinRel a b -> HLinRel a b -> Bool 193 | heq a b = (hsub a b) && (hsub b a) 194 | 195 | 196 | instance Ord (HLinRel a b) where 197 | (<=) = hsub 198 | (>=) = flip hsub 199 | 200 | instance Eq (HLinRel a b) where 201 | (==) = heq 202 | 203 | 204 | -- I can't do this right? 205 | -- hcomplement :: HLinRel a b -> HLinRel a b 206 | -- hcomplement 207 | 208 | hpar :: HLinRel a b -> HLinRel c d -> HLinRel (Either a c) (Either b d) 209 | hpar (HLinRel mab v) (HLinRel mcd v') = HLinRel (fromBlocks [ [mab, 0], [0 , mcd]]) (vjoin [v, v']) where 210 | 211 | 212 | hassoc :: forall a b c. (BEnum a, BEnum b, BEnum c) => HLinRel (Either (Either a b) c) (Either a (Either b c)) 213 | hassoc = HLinRel m v where HLinRel m v = hid @((Either (Either a b) c)) 214 | 215 | hassoc' :: forall a b c. (BEnum a, BEnum b, BEnum c) => HLinRel (Either a (Either b c)) (Either (Either a b) c) 216 | hassoc' = HLinRel m v where HLinRel m v = hid @((Either (Either a b) c)) 217 | 218 | {- 219 | -- Void is unit for Either. 220 | -- void has no inhabitants.... This is a bad boy. 221 | 222 | 223 | hcup :: HLinRel Void (Either a a) 224 | hcap :: HLinRel (Either a a) Void 225 | 226 | 227 | 228 | 229 | -} 230 | hleft :: forall a b. (BEnum a, BEnum b) => HLinRel a (Either a b) 231 | hleft = HLinRel ( i ||| (- i) ||| (konst 0 (ca,cb))) (konst 0 ca) where 232 | ca = card @a 233 | cb = card @b 234 | i = ident ca 235 | 236 | hright :: forall a b. (BEnum a, BEnum b) => HLinRel b (Either a b) 237 | hright = HLinRel ( i ||| (konst 0 (cb,ca)) ||| (- i) ) (konst 0 cb) where 238 | ca = card @a 239 | cb = card @b 240 | i = ident cb 241 | 242 | 243 | hfan :: forall a b c. BEnum a => HLinRel a b -> HLinRel a c -> HLinRel a (Either b c) 244 | hfan (HLinRel m v) (HLinRel m' v') = HLinRel (fromBlocks [ [ma, mb, 0], [ma', 0, mc']]) (vjoin [v,v']) where 245 | ca = card @a 246 | ma = takeColumns ca m 247 | mb = dropColumns ca m 248 | ma' = takeColumns ca m' 249 | mc' = dropColumns ca m' 250 | 251 | 252 | hdump :: HLinRel a Void 253 | hdump = HLinRel 0 0 254 | {- 255 | hlabsorb :: HLinRel a b -> HLinRel (Either Void a) b 256 | hlabsorb (HLinRel m v) = (HLinRel m v) 257 | -} 258 | hlabsorb ::forall a. BEnum a => HLinRel (Either Void a) a 259 | hlabsorb = HLinRel m v where (HLinRel m v) = hid @a 260 | 261 | htrans :: HLinRel a (Either b c) -> HLinRel (Either a b) c 262 | htrans (HLinRel m v) = HLinRel m v 263 | 264 | hswap :: forall a b. (BEnum a, BEnum b) => HLinRel (Either a b) (Either b a) 265 | hswap = HLinRel (fromBlocks [[ia ,0,0 ,-ia], [0, ib,-ib,0]]) (konst 0 (ca + cb)) where 266 | ca = card @a 267 | cb = card @b 268 | ia = ident ca 269 | ib = ident cb 270 | 271 | 272 | hsum :: forall a. BEnum a => HLinRel (Either a a) a 273 | hsum = HLinRel ( i ||| i ||| - i ) (konst 0 ca) where 274 | ca = card @a 275 | i= ident ca 276 | 277 | hdup :: forall a. BEnum a => HLinRel a (Either a a) 278 | hdup = HLinRel (fromBlocks [[i, -i,0 ], [i, 0, -i]]) (konst 0 (ca + ca)) where 279 | ca = card @a 280 | i= ident ca 281 | 282 | -- hcup :: forall a. BEnum a => HLinRel Void (Either a a) 283 | -- hcup = -- or mainulate hid 284 | 285 | -- hcap :: forall a. BEnum a => HLinRel (Either a a) Void 286 | 287 | -- smart constructors 288 | hLinRel :: forall a b. (BEnum a, BEnum b) => Matrix Double -> Vector Double -> Maybe (HLinRel a b) 289 | hLinRel m v | cols m == (ca + cb) && (size v == rows m) = Just (HLinRel m v) 290 | | otherwise = Nothing where 291 | ca = card @a 292 | cb = card @b 293 | 294 | -- a 2d space at every wire or current and voltage. 295 | data IV = I | V deriving (Show, Enum, Bounded, Eq, Ord) 296 | 297 | 298 | resistor :: Double -> HLinRel IV IV 299 | resistor r = HLinRel ( (2><4) [ 1,0,-1, 0, 300 | r, 1, 0, -1]) (konst 0 2) 301 | 302 | bridge :: Double -> HLinRel (Either IV IV) (Either IV IV) 303 | bridge r = HLinRel ( (4><8) [ 1,0, 1, 0, -1, 0, -1, 0, -- current conservation 304 | 0, 1, 0, 0, 0, -1 , 0, 0, --voltage maintained 305 | 0, 0, 0, 1, 0, 0, 0, -1, -- voltage maintained 306 | r, 1, 0,-1, -r, 0, 0, 0 ]) (konst 0 4) 307 | short = bridge 0 308 | 309 | 310 | 311 | first :: BEnum c => HLinRel a b -> HLinRel (Either a c) (Either b c) 312 | first f = hpar f hid 313 | 314 | second :: BEnum a => HLinRel b c -> HLinRel (Either a b) (Either a c) 315 | second f = hpar hid f 316 | 317 | type HLinRel2D u d l r = HLinRel (Either u l) (Either d r) 318 | 319 | 320 | {- 321 | A stencil of 2d resistors for tiling 322 | 323 | 324 | u 325 | / 326 | \ 327 | / 328 | | 329 | l -/\/\/----/\/\/\- r 330 | | 331 | / 332 | \ 333 | / 334 | | 335 | d 336 | 337 | 338 | -} 339 | stencil :: HLinRel2D IV IV IV IV 340 | stencil = (hpar r10 r10) <<< short <<< (hpar r10 r10) where r10 = resistor 10 341 | 342 | horicomp :: forall w w' w'' w''' a b c. (BEnum w, BEnum w', BEnum a, BEnum w''', BEnum b, BEnum w'', BEnum c ) => HLinRel2D w' w'' b c -> HLinRel2D w w''' a b -> HLinRel2D (Either w' w) (Either w'' w''') a c 343 | horicomp f g = hcompose f' g' where 344 | f' :: HLinRel (Either (Either w' w''') b) (Either (Either w'' w''') c) 345 | f' = (first hswap) <<< hassoc' <<< (hpar hid f) <<< hassoc <<< (first hswap) 346 | g' :: HLinRel (Either (Either w' w) a) (Either (Either w' w''') b) 347 | g' = hassoc' <<< (hpar hid g) <<< hassoc 348 | 349 | 350 | rotate :: (BEnum w, BEnum w', BEnum a, BEnum b) => HLinRel2D w w' a b -> HLinRel2D a b w w' 351 | rotate f = hswap <<< f <<< hswap 352 | 353 | vertcomp :: (BEnum w, BEnum w', BEnum a, BEnum d, BEnum b, BEnum w'', BEnum c ) => HLinRel2D w' w'' c d -> HLinRel2D w w' a b -> HLinRel2D w w'' (Either c a) (Either d b) 354 | vertcomp f g = rotate (horicomp (rotate f) (rotate g) ) 355 | 356 | {- 357 | traceV :: -> 358 | traceH :: -> 359 | 360 | 361 | -} 362 | 363 | stencil2 = vertcomp h h where h = stencil `horicomp` stencil 364 | 365 | 366 | 367 | {- Legendre transformations in thermo are for open systems. SOmething to that -} 368 | {- Dependent sources. Well, these are goddamn cheating. 369 | We could do it though 370 | | | 371 | I alpha I 372 | | | 373 | 374 | small signal models of transistor, op amps etc 375 | 376 | 377 | gyrator - would need polynomials from caps and inds 378 | would be kind of nice for boundary models 379 | 380 | kron instead of dsum - Quantum fields 381 | Kron relation might be nice for discussing remnant space of topological matter 382 | 383 | 384 | 385 | -} 386 | 387 | {- 388 | 389 | A wire in a circuit has a potential (questionably) and a current running through it 390 | So our wires should have both of these variables. 391 | 392 | -} 393 | newtype VProbe = VProbe () deriving (Enum, Bounded, Show, Eq, Ord) 394 | vprobe :: HLinRel IV VProbe 395 | vprobe = HLinRel ( (2><3) [1,0,0, 396 | 0,1,-1]) (konst 0 2) 397 | 398 | vsource :: Double -> HLinRel IV IV 399 | vsource v = HLinRel ( (2><4) [ 1,0,-1, 0, 400 | 0, 1, 0, -1]) (fromList [0,v]) 401 | 402 | isource :: Double -> HLinRel IV IV 403 | isource i = HLinRel ( (2><4) [ 1,0, -1, 0 , -- current conservation 404 | 1, 0, 0, 0]) (fromList [0,i]) 405 | 406 | 407 | -- the currents add, but the voltages dup. sum and dup are dual 408 | -- Or should it be |--| a parallel short? 409 | -- Ad then we could open circuit one of them and absorb the Void 410 | -- to derive this 411 | cmerge :: HLinRel (Either IV IV) IV 412 | cmerge = HLinRel ( (3><4) [1, 0, 1, 0, -1, 0, 413 | 0,1,0,0,0 , -1 , 414 | 0,0,0,1, 0, -1]) (konst 0 3) 415 | 416 | open :: HLinRel IV Void 417 | open = HLinRel ( (1><2) [1,0]) (konst 0 1) 418 | 419 | 420 | cap :: HLinRel (Either IV IV) Void 421 | cap = hcompose open cmerge 422 | 423 | cup :: HLinRel Void (Either IV IV) 424 | cup = hconverse cap 425 | 426 | ground :: HLinRel IV Void 427 | ground = HLinRel ( (1><2) [ 0 , 1 ]) (vzero 1) 428 | 429 | -- resistors in parallel. 430 | 431 | ex1 = hcompose (bridge 10) (bridge 10) 432 | ex2 = hcompose (resistor 10) (resistor 30) -- resistors in series. 433 | r20 :: HLinRel IV IV 434 | r20 = resistor 20 435 | 436 | divider :: Double -> Double -> HLinRel (Either IV IV) (Either IV IV) 437 | divider r1 r2 = hcompose (bridge r2) (hpar (resistor r1) hid) 438 | 439 | {- 440 | type StateCoState s = Either s s 441 | type ValueD s = s 442 | 443 | 444 | dynamics :: HLinRel (Either SHOState Control) SHOState 445 | dynamics = HLinRel ((2><5) [ 1, dt, 0, -1 , 0, 446 | -dt, 1, dt, 0, -1 ]) (vzero 2) 447 | where dt = 0.01 448 | 449 | -- labsorb <<< (par initial_cond id) 450 | -} 451 | 452 | 453 | -- state of an oscillator 454 | data SHOState = X | P deriving (Show, Enum, Bounded, Eq, Ord) 455 | data Control = F deriving (Show, Enum, Bounded, Eq, Ord) 456 | -- Costate newtype wrapper 457 | newtype Co a = Co a deriving (Show, Enum, Bounded, Eq, Ord) 458 | 459 | type M = Matrix Double 460 | dynamics :: forall x u. (BEnum x, BEnum u) => Matrix Double -> Matrix Double -> 461 | HLinRel (Either x u) x 462 | dynamics a b = HLinRel (a ||| b ||| -i ) (vzero cx) where 463 | cx = card @x 464 | cu = card @u 465 | i = ident cx 466 | 467 | initial_cond :: forall x. BEnum x => Vector Double-> HLinRel Void x 468 | initial_cond x0 = HLinRel i x0 where 469 | cx = card @x 470 | i = ident cx 471 | 472 | valueUpdate :: forall x l. (BEnum x, BEnum l) => M -> M -> HLinRel (Either x l) l 473 | valueUpdate a q = HLinRel ((tr a) ||| q ||| i) (vzero cl) where 474 | cl = card @l 475 | i = ident cl 476 | 477 | optimal_u :: forall u l. (BEnum u, BEnum l) => 478 | M -> M -> HLinRel u l 479 | optimal_u r b = HLinRel (r ||| tr b) (vzero cu) where 480 | cu = card @u 481 | 482 | step :: forall x u l. (BEnum x, BEnum u, BEnum l) => M -> M -> M -> M 483 | -> HLinRel (Either x l) (Either x l) 484 | step a b r q = 485 | f5 <<< f4 <<< hassoc' <<< f3 <<< f2 <<< hassoc <<< f1 where 486 | f1 :: HLinRel (Either x l) (Either (Either x x) l) 487 | f1 = first hdup 488 | f2 :: HLinRel (Either x (Either x l)) (Either x l) 489 | f2 = second (valueUpdate a q) 490 | f3 :: HLinRel (Either x l) (Either x (Either l l)) 491 | f3 = second hdup 492 | f4 :: HLinRel (Either (Either x l) l) (Either (Either x u) l) 493 | f4 = first (second (hconverse (optimal_u r b))) 494 | f5 :: HLinRel (Either (Either x u) l) (Either x l) 495 | f5 = first (dynamics a b) 496 | 497 | -- iterate (hcompose (step a b r q)) :: [HLinRel (Either x l) (Either x l)] 498 | 499 | 500 | 501 | {- 502 | step :: forall x l. (BEnum x, BEnum l) => 503 | HLinRel (State + ValueD) (State + ValueD) 504 | step a b q r = 505 | = dynamics, 506 | valueUpdate <<< fst 507 | second optimal_u 508 | par (par hid cup) hid 509 | :: , optimal_u 510 | -} 511 | {- 512 | cost :: HLinRel (Either (Co SHOState) SHOState Control) (Co SHOState) 513 | cost = HLinRel ((2><5) [ 1, 0, 0, -1 , 0, 514 | -dt, 1, dt, 0, -1 ]) (vzero 2) 515 | -} 516 | 517 | 518 | 519 | 520 | 521 | {- 522 | 523 | Is there a reasonable intepretation of kron? 524 | 525 | -} 526 | {- 527 | everything can be definedc inefficiently via v2s and h2v functions 528 | 529 | right division 530 | 531 | -} 532 | 533 | {- 534 | Call them affine relations 535 | 536 | Join and meet aren't union and intersection. 537 | They are the affine closure of union and intersection. 538 | 539 | 540 | 541 | 542 | Linear has some niceness. 543 | Homgeonous coordinates usually do. 544 | For clarity and familiaryt I have chosebn not to do it this way 545 | Or maybe I will do it? 546 | 547 | par 548 | 549 | 550 | -} 551 | 552 | 553 | {- 554 | 555 | import numpy as np 556 | 557 | 558 | def meet(a,b): 559 | pass 560 | def compose(a,b): # a after b 561 | assert(a.inN == b.outN) 562 | 563 | combo = np.block([[a.constraints, np.zeros((a.constraints.shape[0] , b.inN) ) ], 564 | [np.zeros((b.constraints.shape[0] , a.outN)) , b.constraints]]) 565 | print(combo) 566 | gens = LinRel(a.inN + a.outN, b.outN, gens=combo).gens 567 | print("gens",gens) 568 | gens = np.vstack((gens[:a.outN, :], gens[-b.inN: , :]) ) 569 | print(gens) 570 | return LinRel(a.outN, b.inN, gens=gens) 571 | def top(outN, inN): 572 | return LinRel(outN, inN, constraints = np.array([[]])) 573 | def bottom(outN, inN): 574 | return LinRel(outN, inN, gens = np.array([[]])) 575 | 576 | def converse(a): 577 | return LinRel(a.inN, a.outN, np.hstack((a.constraints[:, a.inN:], a.constraints[:, :a.inN]))) 578 | def complement(a): 579 | return LinRel(a.outN, a.inN, constraints = a.gens.T.conj()) 580 | 581 | def right_div(a,b): 582 | pass # return complement( compose(a, complement(b)) ) # something like this 583 | def inclusion(a,b): 584 | 585 | s = a.constraints @ b.gens 586 | np.all(a.constraints @ b.gens <= tol ) 587 | 588 | if rcond is None: 589 | rcond = np.finfo(s.dtype).eps * max(max(a.shape), max(b.shape)) 590 | tol = max(np.amax(a), np.amax(b)) 591 | tol = np.amax(s) * rcond 592 | def fromMat(mat): 593 | (outN, inN) = mat.shape 594 | return LinRel(inN, outN,constraints = np.hstack((mat,-np.eye(outN)))) 595 | def id(N): 596 | return fromMat(np.eye(N)) 597 | # make 0,1 first index for in/out? Oh, but then in out have to be same size. 598 | # A[0, ...] @ x + A[1, ...] @ y = 0 599 | # Then I can form the kron of linear relations. 600 | # store sperate A B matrices? A @in + B @ out 601 | class LinRel(): 602 | def __init__(self, outN, inN, constraints = None, gens = None, rcond=None): 603 | #assert(inN <= constraints.shape[1]) 604 | self.inN = inN 605 | self.outN = outN 606 | if constraints is not None: #baiscally scipy.linalg.null_space 607 | u, s, vh = np.linalg.svd(constraints, full_matrices=True) 608 | M, N = u.shape[0], vh.shape[1] 609 | if rcond is None: 610 | rcond = np.finfo(s.dtype).eps * max(M, N) 611 | tol = np.amax(s) * rcond 612 | num = np.sum(s > tol, dtype=int) 613 | self.gens = vh[num:,:].T.conj() 614 | self.constraints = vh[:num,:] 615 | if gens is not None: #basically scipy.linalg.orth 616 | u, s, vh = np.linalg.svd(gens, full_matrices=True) 617 | M, N = u.shape[0], vh.shape[1] 618 | if rcond is None: 619 | rcond = np.finfo(s.dtype).eps * max(M, N) 620 | tol = np.amax(s) * rcond 621 | num = np.sum(s > tol, dtype=int) 622 | self.gens = u[:, :num] 623 | self.constraints = u[:, num:].T.conj() 624 | 625 | def shape(self): 626 | return (self.outN, self.inN) 627 | def size(self): 628 | return self.outN + self.inN 629 | # operator overloadings 630 | def __matmul__(a,b): 631 | return compose(a,b) 632 | def __invert__(a): # ~ 633 | return complement(a) 634 | def __or__(a,b): # an argument could be made for + and * 635 | return join(a,b) 636 | def __and__(a,b): 637 | return meet(a,b) 638 | def __sub__(a,b): 639 | return (a) & (-b) 640 | def __le__(a,b): # Are the others automatic? 641 | return inclusion(a,b) 642 | def __str__(self): 643 | return " Constraints: \n%s, \nGens:\n %s\n" % (str(self.constraints), str(self.gens)) 644 | 645 | 646 | 647 | 648 | ex = LinRel(1,2, np.array([[3,4,0]])) 649 | e2 = LinRel(2,1, np.array([[3,4,0]])) 650 | 651 | assert(np.all(LinRel(1,2, ex.constraints).constraints == ex.constraints) ) 652 | print( ex.constraints @ ex.gens) 653 | assert(np.all( np.abs(ex.constraints @ ex.gens) <= 1e-15) ) 654 | print(ex @ e2) 655 | print(e2 @ ex) 656 | print(e2 @ id(e2.inN)) 657 | 658 | ''' 659 | Quadratic optimization can be mixed in. 660 | Quad && LinRel 661 | 662 | AffineRel = maintain homgenous coord, or always insert 1 -1 keeping homoeg coord 663 | + discrete? Maintain a couple copies of 664 | 665 | 666 | ''' 667 | 668 | 669 | Linear relations 670 | hrep - Ax = 0 671 | or 672 | vrep - y = sum x_i 673 | 674 | hrep <-> vrep = row echelon 675 | 676 | in and out variables. In and out subspaces. 677 | in : [] - list of indices 678 | out : [] 679 | 680 | in is a vrep of input space. 681 | 682 | in = projection/injection matrix n x d 683 | out = projection/injection matrix. (d-n) x d 684 | in * out = 0. orthogonal 685 | 686 | auxiliary variables allowed 687 | 688 | compose relations 689 | in1 out1 690 | in2 out2 691 | 692 | in = d x n 693 | stack out1 and in2 into matrix. with them equal. 694 | 695 | np.hstack(A1, out1 - in2, A2) 696 | in = no.hatck(in, zeores) 697 | in = no.hatck(zeores, out) 698 | 699 | drect sum as monoidal product 700 | block([ A, 0 ], 701 | [ 0, A ]) 702 | in = [in1, in2] 703 | out = [out1, out2] 704 | 705 | converse = flip in out 706 | 707 | meet = combine the two constraint matrices 708 | join = convert? combine in out? 709 | 710 | 1-d as unit object 711 | <= is subspace ordering 712 | 713 | negation = orthogonalization 714 | division => 715 | 716 | 717 | a linear problem is a linear relation of 1-d. 718 | 719 | use in1 and out2 as new in/out 720 | 721 | fan 722 | snd(30,10) = project bottom 10 = idetnity matrix stacked. 723 | fst(30, 10) project top 10 724 | id(20) 725 | id(n) = LinRel(np.zeros(0,2*n), [zeros, eye], [eye, zero] ) 726 | id(n) = LinRel((0,n), eye(n), eye(n) ) 727 | LinRel [I, -I], [I, 0], [0,I] 728 | 729 | "internal" space 730 | class LinRel(): 731 | def __init__(A, in, out): 732 | 733 | svd(in * A , smallest ) 734 | 1 - A*A 735 | 736 | Ax + By = 0 737 | 738 | vstack adds constraints 739 | hstack adds variables 740 | 741 | def idRel(n): 742 | return LinRel(sparse.lil_matrix((0,n)), n, n) 743 | def mapRel(A): 744 | (r,c) = A.shape 745 | newA = sparse.hstack(A, - sparse.eye(r)) 746 | return LinRel(newA, r, c) 747 | 748 | class LinRelV(): 749 | class LinRelH(): 750 | 751 | class LinRel(): 752 | def init(self,A, in, out): 753 | self.A = A 754 | self.in = in 755 | self.out = out 756 | def compose(self, b): 757 | assert(self.out == b.in, "Shapes must match") 758 | 759 | i = sparse.eye(b.in) 760 | cons = sparse.hstack([0, i, -i, 0]) 761 | ina = self.A[:,:self.in] 762 | auxa = self.A[:,self.in:-self.out] 763 | outa = self.A[:,-self.out:] 764 | inb = b.A[:,:b.in] 765 | auxb = b.A[:,b.in:-b.out] 766 | outb = b.A[:,-b.out:] 767 | 768 | newA = sparse.bmat( [[ina, auxa, outa, 0, 0, 0], 769 | [0 , 0 , i, -i, 0 , 0] 770 | [0, 0, 0, inb, auxb,outb]]) 771 | LinRel(newA, self.in, b.out) 772 | def meet(self,b): 773 | #hmm. I suppose we acutlaly should split the thing apart again 774 | assert(self.in == b.in) 775 | assert(self.out == b.out) 776 | assert() 777 | newA = sparse.vstack([self.A, b.A]) 778 | return () 779 | def complement(self): 780 | linalg.svd(self.A) 781 | return LinRel(get_nonzeroeigs) 782 | def __negate__(self): 783 | return self.complement() 784 | def rdiv(self): 785 | def transpose(): 786 | self.converse() 787 | def T(): 788 | self.converse() 789 | 790 | # the svd gives you most of what you need? 791 | def inclusion(): 792 | x = linalg.nullspace(self.A) 793 | return b.A @ x == 0 #check that every generator is in. Makes sense. Except numrically is trash. 794 | def __leq__(self): 795 | self.inclusion(b) 796 | 797 | 798 | 799 | def converse(rel): 800 | newA = np.hstack( [ rel.A[:,-rel.out: ] , rel.A[:,rel.in:-rel.out], rel.A[:,:rel.in ] ]) 801 | return LinRel(newA, rel.out, rel.in) 802 | 803 | 804 | compose(A) 805 | linalg.nullspace(A) 806 | range? 807 | 808 | 809 | 810 | (cons1,d1) = self.A.shape 811 | (cons2,d2) = b.A.shape 812 | constrain = np.hstack 813 | newA = sparse.vstack 814 | 815 | 816 | using bigM I can encode the complement of a H-polytope 817 | but then what? 818 | I do it again I guess? 819 | 820 | 821 | complementation of relation -> 822 | At least one must be inverted. 823 | 824 | polytope inclusion 825 | -> search for point not in B. 826 | Or, do sandardinni encoding 827 | 828 | Really It should generate new variables for an instantiation. 829 | Snd should not reuse the same variables every time. 830 | class PolyRel() 831 | invars = [] 832 | constraints = [] # store >= values, not full constraints? 833 | outvars = [] 834 | def __init__(): 835 | all fresh variables 836 | def compose(self,b): 837 | PolyRel(self.constraints + self., invars = outvars ) 838 | def complement(): 839 | zs = [] 840 | for c in constraints: 841 | z, constraints = reify(c) 842 | # z = cvx.Variable(1, boolean=True) # actually make same shape as c 843 | # c += c + M * z 844 | sum(zs) >= 1 # one or more constraints is disatisfied. 845 | def rdiv(): 846 | 847 | yeah. We should use a dsl, compile it, then encode it. 848 | data Rel a b where 849 | Compose :: 850 | Complement :: 851 | Converse :: 852 | 853 | 854 | relu 855 | 856 | l1 >= 0 857 | l2 >= 0 858 | l1 <= M * z 859 | l2 <= M * (1 - z) 860 | x = lambda1 - lambda2 861 | y = lambda1 862 | 863 | Maybe insetado f subspaces, we should be thinking ellipses and svd. 864 | 865 | 866 | 867 | -} --------------------------------------------------------------------------------