├── .github └── workflows │ └── CI.yml ├── .gitignore ├── ChangeLog.md ├── LICENSE ├── README.md ├── Setup.hs ├── data └── simple.csv ├── optics-by-example.png ├── package.yaml ├── src └── Data │ └── Csv │ └── Lens.hs ├── stack.yaml ├── stack.yaml.lock └── test ├── Data └── Csv │ └── LensSpec.hs └── Spec.hs /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI (Stack) 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | types: [opened, synchronize] 9 | pull_request_review: 10 | types: [submitted] 11 | 12 | jobs: 13 | test_with_stack: 14 | name: Testing with Stack-${{ matrix.stack }}, on ${{ matrix.os }} 15 | runs-on: ${{ matrix.os }} 16 | strategy: 17 | matrix: 18 | os: [ubuntu-latest] 19 | stack: ["2.3.1"] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - uses: actions/setup-haskell@v1.1.3 24 | with: 25 | ghc-version: ${{ matrix.ghc }} 26 | stack-version: ${{ matrix.stack }} 27 | - name: Cache (Unix platform) 28 | uses: actions/cache@v2 29 | with: 30 | path: ~/.stack 31 | key: stack-cache-${{ runner.os }}-${{ hashFiles('stack.yaml.lock') }}-${{ hashFiles('stack.yaml') }} 32 | restore-keys: | 33 | stack-cache-${{ runner.os }}-${{ hashFiles('stack.yaml.lock') }} 34 | stack-cache-${{ runner.os }} 35 | - name: Install dependencies 36 | run: | 37 | stack update 38 | stack build --system-ghc --only-dependencies --test --bench --no-run-tests --no-run-benchmarks 39 | - name: Build 40 | run: stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks 41 | - name: Run tests 42 | run: stack test 43 | - name: Run benchmarks 44 | run: stack bench 45 | - name: Generate tarball 46 | run: stack sdist 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .stack-work/ 2 | lens-csv.cabal 3 | /data/florida.csv 4 | *~ 5 | -------------------------------------------------------------------------------- /ChangeLog.md: -------------------------------------------------------------------------------- 1 | # Changelog for lens-csv 2 | 3 | ## Unreleased changes 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Chris Penner (c) 2019 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Chris Penner nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lens-csv 2 | 3 | * Docs are on [Hackage](http://hackage.haskell.org/package/lens-csv) 4 | 5 | If you enjoy working with lenses (or need a hand learning how they work) my book [Optics By Example](https://leanpub.com/optics-by-example/) is a great place to learn more! 6 | 7 | [![Optics By Example](./optics-by-example.png)](https://leanpub.com/optics-by-example/) 8 | 9 | A lensy layer on top of Cassava which affords streaming, traversable, CSV parsing. 10 | 11 | Still experimental (but working). Please file an issue if there are features the library doesn't support. 12 | 13 | Example: 14 | 15 | ```haskell 16 | >>> import Data.ByteString.Lazy as BL 17 | >>> myCsv <- BL.readFile "./data/simple.csv" 18 | >>> myCsv ^.. namedCsv . taking 2 rows . column @String "state_code" 19 | [ "NY" 20 | , "CA" 21 | ] 22 | 23 | >>> myCsv ^.. namedCsv . taking 2 rows . _NamedRecord @(M.Map String String) 24 | [ fromList [("population","19540000"), ("state_code","NY")] 25 | , fromList [("population","39560000"), ("state_code","CA")] 26 | ] 27 | 28 | -- For csv files without headers 29 | >>> myCsv ^.. csv . taking 2 rows . _Record @[String] 30 | [ ["state_code", "population"] 31 | , ["NY" , "19540000"] 32 | ] 33 | 34 | -- 'column' infers whether it's a named or unnamed csv and accepts the appropriate index type (either ByteString or Int) 35 | >>> myCsv ^.. csv . rows . column @Int 1 36 | [19540000,39560000] 37 | 38 | -- Use traversals to edit cells 'in-place' (add 1337 to California's population) 39 | >>> BL.putStrLn $ myCsv & namedCsv . row 1 . column @Int "population" +~ 1337 40 | state_code,population 41 | NY,19540000 42 | CA,39561337 43 | ``` 44 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /data/simple.csv: -------------------------------------------------------------------------------- 1 | state_code,population 2 | NY,19540000 3 | CA,39560000 4 | -------------------------------------------------------------------------------- /optics-by-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChrisPenner/lens-csv/e112fc30977869089cf3bc859643700324d748c0/optics-by-example.png -------------------------------------------------------------------------------- /package.yaml: -------------------------------------------------------------------------------- 1 | name: lens-csv 2 | version: 0.1.1.0 3 | github: "ChrisPenner/lens-csv" 4 | license: BSD3 5 | author: "Chris Penner" 6 | maintainer: "christopher.penner@gmail.com" 7 | copyright: "Chris Penner" 8 | 9 | extra-source-files: 10 | - README.md 11 | - ChangeLog.md 12 | 13 | # Metadata used when publishing your package 14 | # synopsis: Short description of your package 15 | # category: Other 16 | 17 | # To avoid duplicated efforts in documentation and dealing with the 18 | # complications of embedding Haddock markup inside cabal files, it is 19 | # common to point users to the README.md file. 20 | description: Please see the README on GitHub at 21 | 22 | dependencies: 23 | - base >= 4.7 && < 5 24 | - cassava 25 | - lens 26 | - bytestring 27 | 28 | library: 29 | source-dirs: src 30 | 31 | tests: 32 | lens-csv-test: 33 | main: Spec.hs 34 | source-dirs: test 35 | ghc-options: 36 | - -threaded 37 | - -rtsopts 38 | - -with-rtsopts=-N 39 | dependencies: 40 | - lens-csv 41 | - tasty 42 | - tasty-hspec 43 | - tasty-discover 44 | - containers 45 | -------------------------------------------------------------------------------- /src/Data/Csv/Lens.hs: -------------------------------------------------------------------------------- 1 | {-| 2 | Module : Data.Csv.Lens 3 | Description : A lensy layer on top of Cassava which affords streaming, traversable, CSV parsing. 4 | Copyright : (c) Chris Penner, 2019 5 | License : BSD3 6 | 7 | The examples below use the following csv as the value @myCsv@: 8 | 9 | > state_code,population 10 | > NY,19540000 11 | > CA,39560000 12 | 13 | -} 14 | 15 | {-# LANGUAGE DataKinds #-} 16 | {-# LANGUAGE GADTs #-} 17 | {-# LANGUAGE KindSignatures #-} 18 | {-# LANGUAGE TypeFamilies #-} 19 | {-# LANGUAGE UndecidableInstances #-} 20 | {-# LANGUAGE RankNTypes #-} 21 | {-# LANGUAGE FlexibleInstances #-} 22 | {-# LANGUAGE FlexibleContexts #-} 23 | {-# LANGUAGE ScopedTypeVariables #-} 24 | 25 | module Data.Csv.Lens 26 | ( namedCsv 27 | , csv 28 | , headers 29 | , rows 30 | , row 31 | , columns 32 | , columns' 33 | , column 34 | , column' 35 | , _Record 36 | , _Record' 37 | , _NamedRecord 38 | , _NamedRecord' 39 | , _Field 40 | , _Field' 41 | , Csv' 42 | , CsvRecord 43 | , cassavaNamed 44 | , cassavaUnnamed 45 | , adjustingOutputHeaders 46 | ) where 47 | 48 | import Control.Lens 49 | import qualified Data.ByteString.Lazy as BL hiding (putStrLn) 50 | import Data.Csv hiding (index) 51 | import qualified Data.Csv.Streaming as S 52 | import Data.Foldable 53 | import Data.Either 54 | import GHC.TypeLits 55 | import Data.Kind 56 | import Data.Type.Equality 57 | 58 | -- $setup 59 | -- >>> :set -XOverloadedStrings 60 | -- >>> :set -XTypeApplications 61 | -- >>> :set -XDataKinds 62 | -- >>> import qualified Data.Map as M 63 | -- >>> import qualified Data.ByteString.Lazy.Char8 as BL 64 | -- >>> myCsv <- BL.readFile "./data/simple.csv" 65 | 66 | 67 | -- | A CSV Record which carries a type-level witness of whether the record is named or not. 68 | -- 69 | -- A csv record with named columns has type @'CsvRecord' 'Name'@ where 'Name' is simply an alias for 'BL.ByteString' 70 | -- 71 | -- A csv record with numbered columns has type @'CsvRecord' 'Int'@ 72 | data CsvRecord i where 73 | NamedCsvRecord :: NamedRecord -> CsvRecord Name 74 | CsvRecord :: Record -> CsvRecord Int 75 | 76 | instance Show (CsvRecord i) where 77 | showsPrec 11 r s = "(" <> showsPrec 0 r ")" <> s 78 | showsPrec _ (CsvRecord r) s = "CsvRecord (" <> show r <> ")" <> s 79 | showsPrec _ (NamedCsvRecord r) s = "NamedCsvRecord (" <> show r <> ")" <> s 80 | 81 | type instance Index (CsvRecord i) = i 82 | type instance IxValue (CsvRecord i) = Field 83 | 84 | -- | 'CsvRecord's is indexable using 'ix' by either 'Int' for numbered columns or a 'Name' for 85 | -- named columns. 86 | instance Ixed (CsvRecord i) where 87 | ix i f (NamedCsvRecord r) = NamedCsvRecord <$> (r & ix i %%~ f) 88 | ix i f (CsvRecord r) = CsvRecord <$> (r & ix i %%~ f) 89 | 90 | -- | 'Csv'' is a wrapper around cassava's csv type which carries the appropriate indexing 91 | -- and column header information. 92 | data Csv' i where 93 | NamedCsv :: Header -> S.Records NamedRecord -> Csv' Name 94 | UnnamedCsv :: S.Records Record -> Csv' Int 95 | 96 | type instance Index (Csv' i) = Int 97 | type instance IxValue (Csv' i) = CsvRecord i 98 | 99 | -- | A 'Csv'' is indexable using 'ix' by either 'Int' or 'Name' respectively. 100 | instance Ixed (Csv' i) where 101 | ix i = rows . index i 102 | 103 | instance ToNamedRecord (CsvRecord Name) where 104 | toNamedRecord (NamedCsvRecord r) = r 105 | 106 | instance ToRecord (CsvRecord Int) where 107 | toRecord (CsvRecord r) = r 108 | 109 | instance FromNamedRecord (CsvRecord Name) where 110 | parseNamedRecord r = pure $ NamedCsvRecord r 111 | 112 | instance FromRecord (CsvRecord Int) where 113 | parseRecord r = pure $ CsvRecord r 114 | 115 | 116 | -- | An iso between the results of 'S.decodeByName' or 'S.decodeByNameWith' and a 'Csv'' for use with this library. 117 | -- 118 | -- >>> S.decode HasHeader myCsv ^.. from cassavaUnnamed . rows . column @String 0 119 | -- ["NY","CA"] 120 | cassavaUnnamed :: Iso' (Csv' Int) (S.Records Record) 121 | cassavaUnnamed = iso (\(UnnamedCsv rs) -> rs) UnnamedCsv 122 | 123 | -- | An iso between the results of 'S.decode' or 'S.decodeWith' and a 'Csv'' for use with this library. 124 | -- 125 | -- You should typically just use 'namedCsv', but this can be helpful if you want to provide 126 | -- special options to provide custom decoding options. 127 | -- 128 | -- >>> S.decodeByName myCsv ^.. _Right . from cassavaNamed . rows . column @String "state_code" 129 | -- ["NY","CA"] 130 | cassavaNamed :: Iso' (Csv' Name) (Header, S.Records NamedRecord) 131 | cassavaNamed = iso (\(NamedCsv h rs) -> (h, rs)) (uncurry NamedCsv) 132 | 133 | -- | A prism which attempts to parse a 'BL.ByteString' into a structured @'Csv'' 'Name'@. 134 | -- 135 | -- This uses the first row of the csv as headers. 136 | -- 137 | -- Note that this prism will silently fail to match if your CSV is malformed. 138 | -- Follow up with 'rows', 'row', or 'headers' 139 | -- 140 | -- >>> :t myCsv ^? namedCsv 141 | -- myCsv ^? namedCsv :: Maybe (Csv' Name) 142 | namedCsv :: Prism' BL.ByteString (Csv' Name) 143 | namedCsv = prism' embed project 144 | where 145 | embed :: Csv' Name -> BL.ByteString 146 | embed (NamedCsv headers xs) = encodeByName headers (toList xs) 147 | project :: BL.ByteString -> Maybe (Csv' Name) 148 | project = fmap (uncurry NamedCsv) . preview _Right . S.decodeByName 149 | 150 | -- | A prism which attempts to parse a 'BL.ByteString' into a structured @'Csv'' 'Int'@. 151 | -- 152 | -- Use this with CSVs which don't have a header row. 153 | -- 154 | -- Note that this prism will silently fail to match if your CSV is malformed. 155 | -- Follow up with 'rows' or 'row' 156 | -- 157 | -- >>> :t myCsv ^? csv 158 | -- myCsv ^? csv :: Maybe (Csv' Int) 159 | csv :: Iso' BL.ByteString (Csv' Int) 160 | csv = iso project embed 161 | where 162 | embed :: Csv' Int -> BL.ByteString 163 | embed (UnnamedCsv xs) = encode (toList xs) 164 | project :: BL.ByteString -> (Csv' Int) 165 | project = UnnamedCsv . S.decode NoHeader 166 | 167 | unpackRecordWithName :: CsvRecord Name -> NamedRecord 168 | unpackRecordWithName (NamedCsvRecord r) = r 169 | 170 | unpackRecordWithIndex :: CsvRecord Int -> Record 171 | unpackRecordWithIndex (CsvRecord r) = r 172 | 173 | -- | An indexed fold over the CSV headers of a named CSV. Indexed by the column number 174 | -- starting at 0. 175 | -- 176 | -- >>> myCsv ^.. namedCsv . headers 177 | -- ["state_code","population"] 178 | -- 179 | -- >>> myCsv ^@.. namedCsv . headers 180 | -- [(0,"state_code"),(1,"population")] 181 | headers :: IndexedTraversal' Int (Csv' Name) Name 182 | -- Note to self, this could technically be a traversal, but since we don't want to reparse all 183 | -- records with the new headers we don't yet allow editing headers. 184 | headers f (NamedCsv h xs) = flip NamedCsv xs <$> (h & traversed %%@~ indexed f) 185 | 186 | -- | Allows rewriting, adding and removing headers on the CSV both before serializing and after. 187 | -- Note that rewriting a header name DOES NOT affect any of the records, it only affects the 188 | -- choice and order of the columns in the output CSV. If you want to rename a column header 189 | -- you must also rename the name of that field on all rows in the csv. 190 | -- 191 | -- This is a limitation of cassava itself. 192 | -- 193 | -- Examples: 194 | -- 195 | -- Drop the first column: 196 | -- 197 | -- >>> BL.lines (myCsv & namedCsv . adjustingOutputHeaders (view _tail) %~ id) 198 | -- ["population\r","19540000\r","39560000\r"] 199 | -- 200 | -- Add a new column with the population in millions 201 | -- 202 | -- >>> import Data.Char (toLower) 203 | -- >>> addStateLower m = M.insert "state_lower" (m ^. ix "state_code" . to (map toLower)) m 204 | -- >>> :{ 205 | -- BL.lines (myCsv 206 | -- & namedCsv 207 | -- -- Add "state_lower" to output headers so it will be serialized 208 | -- . adjustingOutputHeaders (<> pure "state_lower") 209 | -- . rows 210 | -- . _NamedRecord @(M.Map String String) 211 | -- -- Add "state_lower" to each record 212 | -- %~ addStateLower 213 | -- ) 214 | -- :} 215 | -- ["state_code,population,state_lower\r","NY,19540000,ny\r","CA,39560000,ca\r"] 216 | -- 217 | -- Reverse column order: 218 | -- 219 | -- >>> BL.lines (myCsv & namedCsv . adjustingOutputHeaders (view reversed) %~ id) 220 | -- ["population,state_code\r","19540000,NY\r","39560000,CA\r"] 221 | -- 222 | adjustingOutputHeaders :: (Header -> Header) -- ^ Adjust headers for the serialization step 223 | -> Iso' (Csv' Name) (Csv' Name) 224 | adjustingOutputHeaders f = iso id (\(NamedCsv h xs) -> NamedCsv (f h) xs) 225 | 226 | -- | An indexed traversal over each row of the csv as a 'CsvRecord'. Passes through 227 | -- a type witness signifying whether the records are 'Name' or 'Int' indexed. 228 | -- 229 | -- Traversing rows of a named csv results in named records: 230 | -- 231 | -- >>> myCsv ^.. namedCsv . rows 232 | -- [NamedCsvRecord (fromList [("population","19540000"),("state_code","NY")]),NamedCsvRecord (fromList [("population","39560000"),("state_code","CA")])] 233 | -- 234 | -- Traversing rows of an indexed csv results in indexed records: 235 | -- 236 | -- >>> myCsv ^.. csv . dropping 1 rows 237 | -- [CsvRecord (["NY","19540000"]),CsvRecord (["CA","39560000"])] 238 | rows :: IndexedTraversal' Int (Csv' i) (CsvRecord i) 239 | rows f (NamedCsv h xs) = NamedCsv h . fmap unpackRecordWithName <$> (xs & traversed %%@~ \i x -> indexed f i (NamedCsvRecord x)) 240 | rows f (UnnamedCsv xs) = UnnamedCsv . fmap unpackRecordWithIndex <$> (xs & traversed %%@~ \i x -> indexed f i (CsvRecord x)) 241 | 242 | -- | Parse and traverse the fields of a 'CsvRecord' into the inferred 'FromField' type. 243 | -- Focuses are indexed by either the column headers or column number accordingly. 244 | -- 245 | -- Be careful to provide appropriate type hints to 'columns' so that it knows which 'Field' 246 | -- type to parse into, any fields which fail to parse will be simply ignored, you can use this 247 | -- strategically to select all fields of a given type within a record. 248 | -- 249 | -- >>> myCsv ^.. namedCsv . row 0 . columns @String 250 | -- ["19540000","NY"] 251 | -- 252 | -- >>> myCsv ^.. namedCsv . row 0 . columns @Int 253 | -- [19540000] 254 | -- 255 | -- 'columns' is indexed, you can use the column number or column header. 256 | -- 257 | -- >>> myCsv ^@.. namedCsv . row 0 . columns @String 258 | -- [("population","19540000"),("state_code","NY")] 259 | -- 260 | -- >>> myCsv ^@.. namedCsv . row 0 . columns @Int 261 | -- [("population",19540000)] 262 | -- 263 | -- 264 | -- >>> BL.lines (myCsv & namedCsv . rows . columns @Int %~ subtract 1) 265 | -- ["state_code,population\r","NY,19539999\r","CA,39559999\r"] 266 | columns :: forall a i. (ToField a, FromField a) => IndexedTraversal' i (CsvRecord i) a 267 | columns = columns' 268 | 269 | -- | A more flexible version of 'columns' which allows the focused field to change types. Affords worse type inference, so prefer 'columns' when possible. 270 | -- 271 | -- See 'columns' for usage examples 272 | columns' :: forall a b i. (FromField a, ToField b) => IndexedTraversal i (CsvRecord i) (CsvRecord i) a b 273 | columns' = cols . _Field' 274 | where 275 | cols :: IndexedTraversal' i (CsvRecord i) Field 276 | cols f (CsvRecord r) = CsvRecord <$> (r & itraversed %%@~ indexed f) 277 | cols f (NamedCsvRecord r) = NamedCsvRecord <$> (r & itraversed %%@~ indexed f) 278 | 279 | -- | Select a specific column of a record by the appropriate index type, either 'Name' for 'namedCsv's or 'Int' for 'csv's 280 | -- 281 | -- See 'columns' for more usage ideas. 282 | -- 283 | -- >>> myCsv ^.. namedCsv . rows . column @Int "population" 284 | -- [19540000,39560000] 285 | -- 286 | -- >>> myCsv ^.. csv . dropping 1 rows . column @String 0 287 | -- ["NY","CA"] 288 | column :: forall a b i. (Eq i, FromField a, ToField a) => i -> IndexedTraversal' i (CsvRecord i) a 289 | column i = column' i 290 | 291 | -- | A more flexible version of 'column' which allows the focused field to change types. Affords worse type inference, so prefer 'column' when possible. 292 | -- 293 | -- See 'column' for usage examples 294 | column' :: forall a b i. (Eq i, FromField a, ToField b) => i -> IndexedTraversal i (CsvRecord i) (CsvRecord i) a b 295 | column' i = t . _Field' 296 | where 297 | t :: IndexedTraversal' i (CsvRecord i) Field 298 | t f x = x & ix i %%~ indexed f i 299 | 300 | -- | Traverse a specific row of the csv by row number. 301 | row :: Int -> IndexedTraversal' Int (Csv' i) (CsvRecord i) 302 | row i f x = x & ix i %%~ indexed f i 303 | 304 | -- | A prism which attempt to parse the given record into a type using 'FromRecord'. 305 | -- 306 | -- Tuples implement 'FromRecord': 307 | -- 308 | -- >>> myCsv ^.. csv . row 1 . _Record @(String, Int) 309 | -- [("NY",19540000)] 310 | -- 311 | -- If we parse each row into a tuple record we can swap the positions and it will write back 312 | -- into a valid CSV. 313 | -- 314 | -- >>> import Data.Tuple (swap) 315 | -- >>> BL.lines (myCsv & csv . rows . _Record @(String, String) %~ swap) 316 | -- ["population,state_code\r","19540000,NY\r","39560000,CA\r"] 317 | _Record :: forall a b. (FromRecord a, ToRecord a) => Prism' (CsvRecord Int) a 318 | _Record = _Record' 319 | 320 | -- | A more flexible version of '_Record' which allows the focus to change types. Affords worse type inference, so prefer '_Record' when possible. 321 | -- 322 | -- See '_Record' for usage examples 323 | _Record' :: forall a b. (FromRecord a, ToRecord b) => Prism (CsvRecord Int) (CsvRecord Int) a b 324 | _Record' = prism embed project 325 | where 326 | project :: CsvRecord Int -> Either (CsvRecord Int) a 327 | project (CsvRecord r) = 328 | case runParser (parseRecord r) of 329 | Left _ -> Left (CsvRecord r) 330 | Right a -> Right a 331 | embed :: b -> CsvRecord Int 332 | embed = CsvRecord . toRecord 333 | 334 | -- | Attempt to parse the given record into a type using 'FromNamedRecord'. 335 | -- 336 | -- >>> myCsv ^? namedCsv . row 0 . _NamedRecord @(M.Map String String) 337 | -- Just (fromList [("population","19540000"),("state_code","NY")]) 338 | _NamedRecord :: forall a b. (FromNamedRecord a, ToNamedRecord a) 339 | => Prism' (CsvRecord Name) a 340 | _NamedRecord = _NamedRecord' 341 | 342 | -- | A more flexible version of '_NamedRecord' which allows the focus to change types. Affords worse type inference, so prefer '_NamedRecord' when possible. 343 | -- 344 | -- See '_NamedRecord' for usage examples 345 | _NamedRecord' :: forall a b. (FromNamedRecord a, ToNamedRecord b) 346 | => Prism (CsvRecord Name) (CsvRecord Name) a b 347 | _NamedRecord' = prism embed project 348 | where 349 | project :: CsvRecord Name -> Either (CsvRecord Name) a 350 | project (NamedCsvRecord r) = 351 | case runParser (parseNamedRecord r) of 352 | Left _ -> Left (NamedCsvRecord r) 353 | Right a -> Right a 354 | embed :: b -> CsvRecord Name 355 | embed = NamedCsvRecord . toNamedRecord 356 | 357 | -- | Attempt to parse the given 'Field' into a type using 'FromField'. 358 | -- 359 | -- You usually won't need this, 'column', 'columns', '_Record', and '_NamedRecord' are usually more flexible and provide more power. 360 | _Field :: forall a. (FromField a, ToField a) => Prism' Field a 361 | _Field = _Field' 362 | 363 | -- | A more flexible version of '_Field' which allows the focus to change types. Affords worse type inference, so prefer '_Field' when possible. 364 | -- 365 | -- You usually won't need this, 'column', 'columns', '_Record', and '_NamedRecord' are usually more flexible and provide more power. 366 | _Field' :: forall a b. (FromField a, ToField b) => Prism Field Field a b 367 | _Field' = prism embed project 368 | where 369 | project s = either (const $ Left s) Right . runParser . parseField $ s 370 | embed = toField 371 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | resolver: lts-16.17 2 | 3 | packages: 4 | - . 5 | -------------------------------------------------------------------------------- /stack.yaml.lock: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by Stack. 2 | # You should not edit this file by hand. 3 | # For more information, please see the documentation at: 4 | # https://docs.haskellstack.org/en/stable/lock_files 5 | 6 | packages: [] 7 | snapshots: 8 | - completed: 9 | size: 532386 10 | url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/17.yaml 11 | sha256: d3ee1ae797cf63189c95cf27f00700304946c5cb3c1e6a82001cd6584a221e1b 12 | original: lts-16.17 13 | -------------------------------------------------------------------------------- /test/Data/Csv/LensSpec.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TypeApplications #-} 2 | {-# LANGUAGE OverloadedStrings #-} 3 | module Data.Csv.LensSpec where 4 | 5 | import Data.ByteString.Lazy as BL 6 | import Test.Tasty 7 | import Test.Tasty.Hspec 8 | 9 | import Control.Lens (taking) 10 | import Control.Lens.Operators 11 | import Data.Csv.Lens 12 | import Data.Map (Map) 13 | import qualified Data.Map as Map 14 | 15 | spec_lens_csv :: Spec 16 | spec_lens_csv = describe "transforming simple.csv" $ do 17 | 18 | it "should extract state codes" $ do 19 | myCsv <- BL.readFile "./data/simple.csv" 20 | let result = myCsv ^.. namedCsv . taking 2 rows . column @String "state_code" 21 | result `shouldBe` [ "NY", "CA" ] 22 | 23 | it "should be able to extract records as maps" $ do 24 | myCsv <- BL.readFile "./data/simple.csv" 25 | let result = myCsv ^.. namedCsv . taking 2 rows . _NamedRecord @(Map String String) 26 | result `shouldBe` [ Map.fromList [("population","19540000"),("state_code","NY")] 27 | , Map.fromList [("population","39560000"),("state_code","CA")] 28 | ] 29 | 30 | it "should allow column indexing using Int" $ do 31 | myCsv <- BL.readFile "./data/simple.csv" 32 | let result = myCsv ^.. csv . rows . column @Int 1 33 | result `shouldBe` [ 19540000, 39560000 ] 34 | 35 | it "should be possible to edit using traversals" $ do 36 | myCsv <- BL.readFile "./data/simple.csv" 37 | let result = myCsv & namedCsv . row 1 . column @Int "population" +~ 1337 38 | result `shouldBe` "state_code,population\r\nNY,19540000\r\nCA,39561337\r\n" 39 | -------------------------------------------------------------------------------- /test/Spec.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -F -pgmF tasty-discover #-} 2 | --------------------------------------------------------------------------------