├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── Setup.hs ├── ads.cabal ├── default-config ├── src ├── cbits │ ├── README.md │ ├── lzma │ │ ├── LzmaDec.c │ │ ├── LzmaDec.h │ │ ├── Types.h │ │ ├── lzma.c │ │ └── lzma.h │ ├── rijndael.c │ └── rijndael.h ├── lib │ ├── Codec │ │ └── Archive │ │ │ ├── README.md │ │ │ ├── Tar.hs │ │ │ └── Tar │ │ │ ├── Check.hs │ │ │ ├── Entry.hs │ │ │ ├── Pack.hs │ │ │ ├── Read.hs │ │ │ ├── Types.hs │ │ │ ├── Unpack.hs │ │ │ └── Write.hs │ ├── Freenet.hs │ ├── Freenet │ │ ├── Archive.hs │ │ ├── Base64.hs │ │ ├── Chk.hs │ │ ├── Companion.hs │ │ ├── Compression.hs │ │ ├── Fetch.hs │ │ ├── Fproxy.hs │ │ ├── Insert.hs │ │ ├── Metadata.hs │ │ ├── Mime.hs │ │ ├── Pcfb.hs │ │ ├── Rijndael.hs │ │ ├── SplitFile.hs │ │ ├── Ssk.hs │ │ ├── Store.hs │ │ ├── Types.hs │ │ └── URI.hs │ ├── Logging.hs │ ├── Message.hs │ ├── Net.hs │ ├── Node.hs │ ├── Peers.hs │ ├── Requests.hs │ ├── RestApi.hs │ ├── Statistics.hs │ ├── Time.hs │ ├── Types.hs │ └── Utils.hs ├── main │ └── Main.hs ├── sandbox │ ├── Main.hs │ ├── Network.hs │ └── Sandbox.hs └── tests │ ├── Main.hs │ ├── Properties.hs │ ├── Properties │ ├── CHK.hs │ ├── Statistics.hs │ └── Types.hs │ └── StoreTest.hs └── webUi ├── css ├── bootstrap-theme.css ├── bootstrap-theme.css.map ├── bootstrap-theme.min.css ├── bootstrap.css ├── bootstrap.css.map ├── bootstrap.min.css └── node-status.css ├── fonts ├── glyphicons-halflings-regular.eot ├── glyphicons-halflings-regular.svg ├── glyphicons-halflings-regular.ttf └── glyphicons-halflings-regular.woff ├── index.html └── js ├── bootstrap.js ├── bootstrap.min.js ├── d3.min.js ├── jquery-2.1.0.min.js └── node-status.js /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .cabal-sandbox/ 3 | cabal.sandbox.config 4 | dist/ 5 | fn-datastore/ 6 | **/#*# 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: haskell 2 | 3 | srcipt: 4 | - cabal configure --enable-tests -f sandbox && cabal build && cabal test 5 | 6 | ghc: 7 | - 7.4 8 | - 7.6 9 | - 7.8 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A Distributed Store 2 | 3 | [![Build Status](https://travis-ci.org/waldheinz/ads.svg?branch=master)](https://travis-ci.org/waldheinz/ads) 4 | 5 | This is a (partial) reimplementation of [Freenet][1], written from scratch in Haskell. It aims for compatibility in data formats, but *not* in the protocol. 6 | 7 | ## Why you don't want to use it straight away 8 | 9 | * The node to node communication is neither encrypted nor secured against any form of impersonation or whatever evil you may think of. 10 | * The FProxy implementation in no way filters the content it pulls from Freenet, and passes it straight to your browser. This means something as simple as an embedded image will reveal your identity to someone on the internet. 11 | * I have only limited understanding of cryptography. Maybe this is not exactly the problem here, because I just reimplemented what Freenet does. I can't judge on the crypto expertise of Freenet developers / designers, so you're on you own with this. 12 | * This is very young code, and many things are still in flux and rough around the edges. If you're not a "developer" chances are you won't get it to run (or shoot yourself in the foot while trying). 13 | 14 | 15 | ## Give it a try anyway 16 | 17 | Here's what needs to be done: 18 | 19 | * Check out and compile the code, preferably in a cabal sandbox. 20 | * Start it up for the first time with `cabal run`, and shut it down again when you see the `node listening on ...` message. This will initialize a `~/.ads` directory containing: 21 | * A `config` file with some basic options in [configurator][configurator] format. You may want to adjust 22 | * `node.listen`: where your node listens for node-to-node connections 23 | * `node.http` : there is some rudimentary web interface / REST API, this allows you to tune where this listens 24 | * `fproxy`: this is where the FProxy allows to browse Freesites. This is currently separate from `node.http` for ... reasons. It's likely to be merged with the former soon. 25 | * `freenet.datastore`: the directory where the datastore lives, and how big you want it to be. Probably you want to change the defaults, here are some things to keep in mind: 26 | * Currently it is not possible to grow/shrink the data store. When you decide you want to change it, all data will be lost. Don't be that guy. 27 | * These files are created as sparse files by default. This means they won't take up space until data is actually written there, which is good. It also means that they'll likely be heavily fragmented when they have grown to a rasonable size, which is bad. If you can afford the space, just delete the freshly created files and recreate them with the `fallocate` command. This is quick and really makes a difference performance-wise. Also, COW file systems like ZFS and probably BTRFS tend to fragment these files as well. Good results can be obtained with the `ext4` + `fallocate` combo. If your store lives on an SSD you don't have to worry about fragmentation, only about the size. 28 | * The `*histogram` files are re-created when lost, but this can take some time when the store is large. 29 | * A `identity` file in JSON format, containing 30 | * the `id` of your node, which is just 32 random bytes. It's not particulary important what that id is, but changing it after the first connection to the network might cause [severe problems][id-mismatch] finding nodes willing to talk to you. Just let it alone. 31 | * an `addresses` list, defining where your node can be reached by other nodes. This list can be empty, which means your node will only ever make outgoing connections. If at all possible, you should put some adresses there, each in the following format: `{ "host" : "", "port" : }`. The port number will likely be the same you used in `node.listen` in the `config` file, but may be different when using port forwardings. For the hostname, anything goes: plain IPv4 and IPv6 adresses, as well as DNS names. When other nodes try to connect to your node, they will try the adresses in the order you put them in this list. This is useful when your node can be reached over the Internet and over some LAN: Just put the local address before the Internet address, and other nodes on the same network will prefer the faster link; maybe a laptop talking to the desktop PC. 32 | * You'll also need a `peers` file to make some connections: These are the peers your node will initally connect to, and then it will learn about other nodes from the nodes it has connected to, and so on. [Here][seed-nodes] is one to get you started. 33 | * When all this works, you may point your FProxy to Enzo's index at `USK@XJZAi25dd5y7lrxE3cHMmM-xZ-c-hlPpKLYeLC0YG5I,8XTbR1bd9RBXlX6j-OZNednsJ8Cl6EAeBBebC3jtMFU,AQACAAE/index/336/`. (I do not know who Enzo is, and I don't endorse any of the content on his site. It's just an up-to-date index containing links to other Freesites you may or may not like. Which I may or may not like. Which may or may not be legal in your country.) 34 | 35 | ## Features 36 | 37 | * TCP based protocol which allows data exchange between the nodes 38 | * can decode, encode and verify [CHK blocks][2] 39 | * can decode and verify [SSK blocks][3] 40 | * can decode most [split files][4] (except for very large ones, which use a metadata format which is not fully implemented) 41 | * can parse almost all of the weird archive & metadata formats which are mostly used to compose [Freesites][5] 42 | * there is a rudimentary Fproxy implementation which allows to view almost every Freesite in existence (well, at least the ones I dared to click on) 43 | * uses [Next Best Once][6] to make routing decisions (I hope I got it right) 44 | * decent performance and low resource usage. The latter is mainly true for nodes where the FProxy is not used (which are only forwarding and storing data). The FProxy could use some more love, though. 45 | 46 | ## Interop with Freenet 47 | 48 | Interop with freenet mainly means we need a way to pull data blocks from Freenet into the ADS network and vice versa. 49 | 50 | ### From Freenet to ADS 51 | 52 | This works quite well, and is implemented by the means of a [Freenet plugin][7]. The plugin listens on a TCP socket, and an ADS node can be configured to connect there. ADS nodes which have such an companion configured will ask their Freenet counterpart if they can't find some data locally. 53 | 54 | Setting this up is a bit tendious at first, but at least it runs quite stable unattended and gives decent performance. I have a battery of 16 Freenet companion nodes evenly distibuted across the keyspace, which allow data to migrate from Freenet to ADS. This gives impressive performance compared to what a single Freenet node can provide, no matter how many resources you throw at it. 55 | 56 | ### From ADS to Freenet 57 | 58 | I have a partial implementation of this, in the form of a patched Freenet node. This node will connect to other Freenet nodes just as usual. But all incoming requests are not forwarded to other Freenet nodes, but are translated to HTTP requests against an ADS node's REST API. This is working well so far, but the problem is that the Freenet node is not sending the data it gets from ADS onward to whichever Freenet node requested it. This means the data has to be requested twice from the Freenet side to be actually routed: 59 | 60 | * the first time it finds it's way from ADS to the pached Freenet node's data store and 61 | * the second time this node finds it in it's store and it's actually routed within Freenet 62 | 63 | This is far from optimal, and probably someone more familiar with the Freenet codebase can easily fix this. Maybe. 64 | 65 | [1]: https://freenetproject.org/ 66 | [2]: https://wiki.freenetproject.org/Content_Hash_Key 67 | [3]: https://wiki.freenetproject.org/SSK 68 | [4]: https://wiki.freenetproject.org/Splitfile 69 | [5]: https://wiki.freenetproject.org/Freesite 70 | [6]: http://arxiv.org/abs/1401.2165 71 | [7]: https://github.com/waldheinz/ads-companion 72 | [configurator]: http://hackage.haskell.org/package/configurator 73 | [id-mismatch]: https://github.com/waldheinz/ads/blob/master/src/Node.hs#L417 74 | [seed-nodes]: https://gist.github.com/waldheinz/317bfacd16eab84099f1 75 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /ads.cabal: -------------------------------------------------------------------------------- 1 | name: ads 2 | version: 0.1.0.0 3 | -- synopsis: 4 | -- description: 5 | license: GPL-3 6 | license-file: LICENSE 7 | author: Matthias Treydte 8 | maintainer: mt@waldheinz.de 9 | -- copyright: 10 | category: Network 11 | build-type: Simple 12 | -- extra-source-files: 13 | cabal-version: >=1.10 14 | 15 | data-files: 16 | default-config 17 | 18 | flag sandbox 19 | description: Enable building the sandbox 20 | default: False 21 | 22 | library 23 | hs-source-dirs: src/lib 24 | default-language: Haskell2010 25 | ghc-options: -Wall -O2 -threaded 26 | c-sources: 27 | src/cbits/rijndael.c, 28 | src/cbits/lzma/lzma.c, 29 | src/cbits/lzma/LzmaDec.c 30 | cc-options: -Wall -O2 -std=c99 31 | exposed-modules: 32 | Codec.Archive.Tar.Read, 33 | Codec.Archive.Tar.Types, 34 | Freenet, 35 | Freenet.Archive, 36 | Freenet.Base64, 37 | Freenet.Chk, 38 | Freenet.Companion, 39 | Freenet.Compression 40 | Freenet.Fetch, 41 | Freenet.Fproxy, 42 | Freenet.Insert, 43 | Freenet.Metadata, 44 | Freenet.Mime, 45 | Freenet.Pcfb, 46 | Freenet.Rijndael, 47 | Freenet.SplitFile, 48 | Freenet.Ssk, 49 | Freenet.Store 50 | Freenet.Types, 51 | Freenet.URI, 52 | Logging, 53 | Net, 54 | Node, 55 | Message, 56 | Peers, 57 | RestApi, 58 | Statistics, 59 | Time, 60 | Types, 61 | Utils 62 | 63 | build-depends: 64 | base ==4.*, 65 | aeson >= 0.7, 66 | array >= 0.4, 67 | async >= 2.0, 68 | base16-bytestring >= 0.1, 69 | base64-bytestring >= 1.0, 70 | binary >= 0.5, 71 | binary-conduit >= 1.2, 72 | bytestring >= 0.9, 73 | bzlib >= 0.5, 74 | cipher-aes >= 0.2, 75 | concurrent-extra >= 0.7, 76 | conduit >= 1.0, 77 | conduit-extra >= 1.1, 78 | configurator >= 0.2, 79 | cprng-aes == 0.5.*, 80 | crypto-pubkey >= 0.2, 81 | directory >= 1.2, 82 | fec >= 0.1, 83 | filepath >= 1.3, 84 | hashable >= 1.2, 85 | hslogger >= 1.2, 86 | http-types >= 0.8, 87 | lrucache >= 1.1, 88 | network >= 2.4, 89 | QuickCheck >= 2.7, 90 | random >= 1.0, 91 | SHA >= 1.6, 92 | stm >= 2.4, 93 | stm-chans >= 3.0, 94 | stm-conduit >= 2.2, 95 | text >= 0.11, 96 | time >= 1.1, 97 | transformers >= 0.3, 98 | unix >= 2.6, 99 | unix-bytestring, 100 | unordered-containers >= 0.2, 101 | vector >= 0.10, 102 | void >= 0.6, 103 | wai >= 3.0, 104 | wai-app-static >= 3.0, 105 | wai-extra >= 3.0, 106 | warp >= 3.0, 107 | zip-archive >= 0.2, 108 | zlib >= 0.5 109 | 110 | executable ads 111 | hs-source-dirs: src/main 112 | default-language: Haskell2010 113 | main-is: Main.hs 114 | ghc-options: -Wall -threaded 115 | build-depends: 116 | ads, aeson, base, bytestring, configurator, directory, filepath, network, 117 | random, stm, unix, warp 118 | 119 | executable ads-sandbox 120 | hs-source-dirs: src/sandbox 121 | default-language: Haskell2010 122 | main-is: Main.hs 123 | ghc-options: -Wall -threaded 124 | 125 | if flag(sandbox) 126 | buildable: True 127 | else 128 | buildable: False 129 | 130 | build-depends: 131 | ads, base, conduit, mtl, random, stm, stm-conduit, 132 | unordered-containers 133 | 134 | test-suite test-ads 135 | type: 136 | exitcode-stdio-1.0 137 | default-language: 138 | Haskell2010 139 | hs-source-dirs: 140 | src/tests 141 | main-is: 142 | Main.hs 143 | ghc-options: 144 | -Wall -threaded 145 | build-depends: 146 | ads, 147 | base >= 4.0, 148 | bytestring, 149 | HUnit >= 1.2, 150 | temporary >= 1.1, 151 | test-framework >= 0.8, 152 | test-framework-hunit >= 0.3, 153 | test-framework-quickcheck2 >= 0.3, 154 | QuickCheck >= 2.0, 155 | stm 156 | -------------------------------------------------------------------------------- /default-config: -------------------------------------------------------------------------------- 1 | 2 | node { 3 | listen { 4 | host = "*" 5 | port = 63763 6 | } 7 | 8 | http { 9 | host = "localhost" 10 | port = 8080 11 | } 12 | } 13 | 14 | logging { 15 | 16 | } 17 | 18 | fproxy { 19 | enabled = false 20 | port = 8081 21 | } 22 | 23 | freenet { 24 | datastore { 25 | directory = "$(HOME)/.ads/data-store" 26 | chk-count = 300000 27 | ssk-count = 300000 28 | } 29 | 30 | companion { 31 | # host = "127.0.0.1" 32 | port = 12345 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/cbits/README.md: -------------------------------------------------------------------------------- 1 | # Why this is needed 2 | 3 | ## Rijndael 4 | 5 | Freenet makes use of the Rijndael block ciphre, which is *not* the same as 6 | AES. AES is a subset of Rijndael, not supporting the 256 bit block size 7 | used by Freenet in some places: 8 | 9 | * There are two variants of CHK blocks, and the older version makes use of 10 | Rijndael. Those blocks are still floating around and can be decrypted using 11 | this code. The newer variant does use plain AES, and only such blocks are 12 | ever created when inserting new data. So this code is used only for decoding, 13 | and will eventually be obsoleted when the last old CHK block has fallen out 14 | of the stores. 15 | 16 | * The original UDP - based Freenet protocol makes extensive use of this, 17 | which is not really a concern for ADS as it does not support this protocol 18 | currently. 19 | 20 | ## LZMA 21 | 22 | LZMA is an older variant of what is now known as XZ, also used by Freenet as one 23 | possible compression format. The [lzma-conduit][1] package only supports the newer 24 | format, so I pulled this in. Having a proper conduit wrapper for this would be 25 | key to allowing download of larger files from Freenet, as the current code does 26 | not support streaming decompression. 27 | 28 | [1]: http://hackage.haskell.org/package/lzma-conduit 29 | -------------------------------------------------------------------------------- /src/cbits/lzma/LzmaDec.h: -------------------------------------------------------------------------------- 1 | /* LzmaDec.h -- LZMA Decoder 2 | 2009-02-07 : Igor Pavlov : Public domain */ 3 | 4 | #ifndef __LZMA_DEC_H 5 | #define __LZMA_DEC_H 6 | 7 | #include "Types.h" 8 | 9 | #ifdef __cplusplus 10 | extern "C" { 11 | #endif 12 | 13 | /* #define _LZMA_PROB32 */ 14 | /* _LZMA_PROB32 can increase the speed on some CPUs, 15 | but memory usage for CLzmaDec::probs will be doubled in that case */ 16 | 17 | #ifdef _LZMA_PROB32 18 | #define CLzmaProb UInt32 19 | #else 20 | #define CLzmaProb UInt16 21 | #endif 22 | 23 | 24 | /* ---------- LZMA Properties ---------- */ 25 | 26 | #define LZMA_PROPS_SIZE 5 27 | 28 | typedef struct _CLzmaProps 29 | { 30 | unsigned lc, lp, pb; 31 | UInt32 dicSize; 32 | } CLzmaProps; 33 | 34 | /* LzmaProps_Decode - decodes properties 35 | Returns: 36 | SZ_OK 37 | SZ_ERROR_UNSUPPORTED - Unsupported properties 38 | */ 39 | 40 | SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); 41 | 42 | 43 | /* ---------- LZMA Decoder state ---------- */ 44 | 45 | /* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case. 46 | Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */ 47 | 48 | #define LZMA_REQUIRED_INPUT_MAX 20 49 | 50 | typedef struct 51 | { 52 | CLzmaProps prop; 53 | CLzmaProb *probs; 54 | Byte *dic; 55 | const Byte *buf; 56 | UInt32 range, code; 57 | SizeT dicPos; 58 | SizeT dicBufSize; 59 | UInt32 processedPos; 60 | UInt32 checkDicSize; 61 | unsigned state; 62 | UInt32 reps[4]; 63 | unsigned remainLen; 64 | int needFlush; 65 | int needInitState; 66 | UInt32 numProbs; 67 | unsigned tempBufSize; 68 | Byte tempBuf[LZMA_REQUIRED_INPUT_MAX]; 69 | } CLzmaDec; 70 | 71 | #define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; } 72 | 73 | void LzmaDec_Init(CLzmaDec *p); 74 | 75 | /* There are two types of LZMA streams: 76 | 0) Stream with end mark. That end mark adds about 6 bytes to compressed size. 77 | 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */ 78 | 79 | typedef enum 80 | { 81 | LZMA_FINISH_ANY, /* finish at any point */ 82 | LZMA_FINISH_END /* block must be finished at the end */ 83 | } ELzmaFinishMode; 84 | 85 | /* ELzmaFinishMode has meaning only if the decoding reaches output limit !!! 86 | 87 | You must use LZMA_FINISH_END, when you know that current output buffer 88 | covers last bytes of block. In other cases you must use LZMA_FINISH_ANY. 89 | 90 | If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK, 91 | and output value of destLen will be less than output buffer size limit. 92 | You can check status result also. 93 | 94 | You can use multiple checks to test data integrity after full decompression: 95 | 1) Check Result and "status" variable. 96 | 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize. 97 | 3) Check that output(srcLen) = compressedSize, if you know real compressedSize. 98 | You must use correct finish mode in that case. */ 99 | 100 | typedef enum 101 | { 102 | LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */ 103 | LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ 104 | LZMA_STATUS_NOT_FINISHED, /* stream was not finished */ 105 | LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */ 106 | LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */ 107 | } ELzmaStatus; 108 | 109 | /* ELzmaStatus is used only as output value for function call */ 110 | 111 | 112 | /* ---------- Interfaces ---------- */ 113 | 114 | /* There are 3 levels of interfaces: 115 | 1) Dictionary Interface 116 | 2) Buffer Interface 117 | 3) One Call Interface 118 | You can select any of these interfaces, but don't mix functions from different 119 | groups for same object. */ 120 | 121 | 122 | /* There are two variants to allocate state for Dictionary Interface: 123 | 1) LzmaDec_Allocate / LzmaDec_Free 124 | 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs 125 | You can use variant 2, if you set dictionary buffer manually. 126 | For Buffer Interface you must always use variant 1. 127 | 128 | LzmaDec_Allocate* can return: 129 | SZ_OK 130 | SZ_ERROR_MEM - Memory allocation error 131 | SZ_ERROR_UNSUPPORTED - Unsupported properties 132 | */ 133 | 134 | SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc); 135 | void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc); 136 | 137 | SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc); 138 | void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc); 139 | 140 | /* ---------- Dictionary Interface ---------- */ 141 | 142 | /* You can use it, if you want to eliminate the overhead for data copying from 143 | dictionary to some other external buffer. 144 | You must work with CLzmaDec variables directly in this interface. 145 | 146 | STEPS: 147 | LzmaDec_Constr() 148 | LzmaDec_Allocate() 149 | for (each new stream) 150 | { 151 | LzmaDec_Init() 152 | while (it needs more decompression) 153 | { 154 | LzmaDec_DecodeToDic() 155 | use data from CLzmaDec::dic and update CLzmaDec::dicPos 156 | } 157 | } 158 | LzmaDec_Free() 159 | */ 160 | 161 | /* LzmaDec_DecodeToDic 162 | 163 | The decoding to internal dictionary buffer (CLzmaDec::dic). 164 | You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! 165 | 166 | finishMode: 167 | It has meaning only if the decoding reaches output limit (dicLimit). 168 | LZMA_FINISH_ANY - Decode just dicLimit bytes. 169 | LZMA_FINISH_END - Stream must be finished after dicLimit. 170 | 171 | Returns: 172 | SZ_OK 173 | status: 174 | LZMA_STATUS_FINISHED_WITH_MARK 175 | LZMA_STATUS_NOT_FINISHED 176 | LZMA_STATUS_NEEDS_MORE_INPUT 177 | LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK 178 | SZ_ERROR_DATA - Data error 179 | */ 180 | 181 | SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, 182 | const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); 183 | 184 | 185 | /* ---------- Buffer Interface ---------- */ 186 | 187 | /* It's zlib-like interface. 188 | See LzmaDec_DecodeToDic description for information about STEPS and return results, 189 | but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need 190 | to work with CLzmaDec variables manually. 191 | 192 | finishMode: 193 | It has meaning only if the decoding reaches output limit (*destLen). 194 | LZMA_FINISH_ANY - Decode just destLen bytes. 195 | LZMA_FINISH_END - Stream must be finished after (*destLen). 196 | */ 197 | 198 | SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, 199 | const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); 200 | 201 | 202 | /* ---------- One Call Interface ---------- */ 203 | 204 | /* LzmaDecode 205 | 206 | finishMode: 207 | It has meaning only if the decoding reaches output limit (*destLen). 208 | LZMA_FINISH_ANY - Decode just destLen bytes. 209 | LZMA_FINISH_END - Stream must be finished after (*destLen). 210 | 211 | Returns: 212 | SZ_OK 213 | status: 214 | LZMA_STATUS_FINISHED_WITH_MARK 215 | LZMA_STATUS_NOT_FINISHED 216 | LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK 217 | SZ_ERROR_DATA - Data error 218 | SZ_ERROR_MEM - Memory allocation error 219 | SZ_ERROR_UNSUPPORTED - Unsupported properties 220 | SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). 221 | */ 222 | 223 | SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, 224 | const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, 225 | ELzmaStatus *status, ISzAlloc *alloc); 226 | 227 | #ifdef __cplusplus 228 | } 229 | #endif 230 | 231 | #endif 232 | -------------------------------------------------------------------------------- /src/cbits/lzma/Types.h: -------------------------------------------------------------------------------- 1 | /* Types.h -- Basic types 2 | 2010-10-09 : Igor Pavlov : Public domain */ 3 | 4 | #ifndef __7Z_TYPES_H 5 | #define __7Z_TYPES_H 6 | 7 | #include 8 | 9 | #ifdef _WIN32 10 | #include 11 | #endif 12 | 13 | #ifndef EXTERN_C_BEGIN 14 | #ifdef __cplusplus 15 | #define EXTERN_C_BEGIN extern "C" { 16 | #define EXTERN_C_END } 17 | #else 18 | #define EXTERN_C_BEGIN 19 | #define EXTERN_C_END 20 | #endif 21 | #endif 22 | 23 | EXTERN_C_BEGIN 24 | 25 | #define SZ_OK 0 26 | 27 | #define SZ_ERROR_DATA 1 28 | #define SZ_ERROR_MEM 2 29 | #define SZ_ERROR_CRC 3 30 | #define SZ_ERROR_UNSUPPORTED 4 31 | #define SZ_ERROR_PARAM 5 32 | #define SZ_ERROR_INPUT_EOF 6 33 | #define SZ_ERROR_OUTPUT_EOF 7 34 | #define SZ_ERROR_READ 8 35 | #define SZ_ERROR_WRITE 9 36 | #define SZ_ERROR_PROGRESS 10 37 | #define SZ_ERROR_FAIL 11 38 | #define SZ_ERROR_THREAD 12 39 | 40 | #define SZ_ERROR_ARCHIVE 16 41 | #define SZ_ERROR_NO_ARCHIVE 17 42 | 43 | typedef int SRes; 44 | 45 | #ifdef _WIN32 46 | typedef DWORD WRes; 47 | #else 48 | typedef int WRes; 49 | #endif 50 | 51 | #ifndef RINOK 52 | #define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; } 53 | #endif 54 | 55 | typedef unsigned char Byte; 56 | typedef short Int16; 57 | typedef unsigned short UInt16; 58 | 59 | #ifdef _LZMA_UINT32_IS_ULONG 60 | typedef long Int32; 61 | typedef unsigned long UInt32; 62 | #else 63 | typedef int Int32; 64 | typedef unsigned int UInt32; 65 | #endif 66 | 67 | #ifdef _SZ_NO_INT_64 68 | 69 | /* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers. 70 | NOTES: Some code will work incorrectly in that case! */ 71 | 72 | typedef long Int64; 73 | typedef unsigned long UInt64; 74 | 75 | #else 76 | 77 | #if defined(_MSC_VER) || defined(__BORLANDC__) 78 | typedef __int64 Int64; 79 | typedef unsigned __int64 UInt64; 80 | #define UINT64_CONST(n) n 81 | #else 82 | typedef long long int Int64; 83 | typedef unsigned long long int UInt64; 84 | #define UINT64_CONST(n) n ## ULL 85 | #endif 86 | 87 | #endif 88 | 89 | #ifdef _LZMA_NO_SYSTEM_SIZE_T 90 | typedef UInt32 SizeT; 91 | #else 92 | typedef size_t SizeT; 93 | #endif 94 | 95 | typedef int Bool; 96 | #define True 1 97 | #define False 0 98 | 99 | 100 | #ifdef _WIN32 101 | #define MY_STD_CALL __stdcall 102 | #else 103 | #define MY_STD_CALL 104 | #endif 105 | 106 | #ifdef _MSC_VER 107 | 108 | #if _MSC_VER >= 1300 109 | #define MY_NO_INLINE __declspec(noinline) 110 | #else 111 | #define MY_NO_INLINE 112 | #endif 113 | 114 | #define MY_CDECL __cdecl 115 | #define MY_FAST_CALL __fastcall 116 | 117 | #else 118 | 119 | #define MY_CDECL 120 | #define MY_FAST_CALL 121 | 122 | #endif 123 | 124 | 125 | /* The following interfaces use first parameter as pointer to structure */ 126 | 127 | typedef struct 128 | { 129 | Byte (*Read)(void *p); /* reads one byte, returns 0 in case of EOF or error */ 130 | } IByteIn; 131 | 132 | typedef struct 133 | { 134 | void (*Write)(void *p, Byte b); 135 | } IByteOut; 136 | 137 | typedef struct 138 | { 139 | SRes (*Read)(void *p, void *buf, size_t *size); 140 | /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. 141 | (output(*size) < input(*size)) is allowed */ 142 | } ISeqInStream; 143 | 144 | /* it can return SZ_ERROR_INPUT_EOF */ 145 | SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size); 146 | SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType); 147 | SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf); 148 | 149 | typedef struct 150 | { 151 | size_t (*Write)(void *p, const void *buf, size_t size); 152 | /* Returns: result - the number of actually written bytes. 153 | (result < size) means error */ 154 | } ISeqOutStream; 155 | 156 | typedef enum 157 | { 158 | SZ_SEEK_SET = 0, 159 | SZ_SEEK_CUR = 1, 160 | SZ_SEEK_END = 2 161 | } ESzSeek; 162 | 163 | typedef struct 164 | { 165 | SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */ 166 | SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); 167 | } ISeekInStream; 168 | 169 | typedef struct 170 | { 171 | SRes (*Look)(void *p, const void **buf, size_t *size); 172 | /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. 173 | (output(*size) > input(*size)) is not allowed 174 | (output(*size) < input(*size)) is allowed */ 175 | SRes (*Skip)(void *p, size_t offset); 176 | /* offset must be <= output(*size) of Look */ 177 | 178 | SRes (*Read)(void *p, void *buf, size_t *size); 179 | /* reads directly (without buffer). It's same as ISeqInStream::Read */ 180 | SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); 181 | } ILookInStream; 182 | 183 | SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size); 184 | SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset); 185 | 186 | /* reads via ILookInStream::Read */ 187 | SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType); 188 | SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size); 189 | 190 | #define LookToRead_BUF_SIZE (1 << 14) 191 | 192 | typedef struct 193 | { 194 | ILookInStream s; 195 | ISeekInStream *realStream; 196 | size_t pos; 197 | size_t size; 198 | Byte buf[LookToRead_BUF_SIZE]; 199 | } CLookToRead; 200 | 201 | void LookToRead_CreateVTable(CLookToRead *p, int lookahead); 202 | void LookToRead_Init(CLookToRead *p); 203 | 204 | typedef struct 205 | { 206 | ISeqInStream s; 207 | ILookInStream *realStream; 208 | } CSecToLook; 209 | 210 | void SecToLook_CreateVTable(CSecToLook *p); 211 | 212 | typedef struct 213 | { 214 | ISeqInStream s; 215 | ILookInStream *realStream; 216 | } CSecToRead; 217 | 218 | void SecToRead_CreateVTable(CSecToRead *p); 219 | 220 | typedef struct 221 | { 222 | SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize); 223 | /* Returns: result. (result != SZ_OK) means break. 224 | Value (UInt64)(Int64)-1 for size means unknown value. */ 225 | } ICompressProgress; 226 | 227 | typedef struct 228 | { 229 | void *(*Alloc)(void *p, size_t size); 230 | void (*Free)(void *p, void *address); /* address can be 0 */ 231 | } ISzAlloc; 232 | 233 | #define IAlloc_Alloc(p, size) (p)->Alloc((p), size) 234 | #define IAlloc_Free(p, a) (p)->Free((p), a) 235 | 236 | #ifdef _WIN32 237 | 238 | #define CHAR_PATH_SEPARATOR '\\' 239 | #define WCHAR_PATH_SEPARATOR L'\\' 240 | #define STRING_PATH_SEPARATOR "\\" 241 | #define WSTRING_PATH_SEPARATOR L"\\" 242 | 243 | #else 244 | 245 | #define CHAR_PATH_SEPARATOR '/' 246 | #define WCHAR_PATH_SEPARATOR L'/' 247 | #define STRING_PATH_SEPARATOR "/" 248 | #define WSTRING_PATH_SEPARATOR L"/" 249 | 250 | #endif 251 | 252 | EXTERN_C_END 253 | 254 | #endif 255 | -------------------------------------------------------------------------------- /src/cbits/lzma/lzma.c: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include "lzma.h" 5 | 6 | struct lzma_dec_state { 7 | CLzmaDec decoder; 8 | ISzAlloc alloc; 9 | }; 10 | 11 | void *alloc_impl(void *p, size_t size) { 12 | p = p; 13 | return malloc(size); 14 | } 15 | 16 | void free_impl(void *p, void* addr) { 17 | p = p; 18 | free(addr); 19 | } 20 | 21 | struct lzma_dec_state *lzma_dec_init(void* props) { 22 | struct lzma_dec_state *state = malloc(sizeof(struct lzma_dec_state)); 23 | memset(state, 0, sizeof(*state)); 24 | 25 | if (!state) { 26 | return NULL; 27 | } 28 | 29 | state->alloc.Alloc = &alloc_impl; 30 | state->alloc.Free = &free_impl; 31 | 32 | if (LzmaDec_Allocate(&state->decoder, props, 5, &state->alloc)) { 33 | free (state); 34 | return NULL; 35 | } 36 | 37 | LzmaDec_Init(&state->decoder); 38 | 39 | return state; 40 | } 41 | 42 | void lzma_dec_free(struct lzma_dec_state *state) { 43 | LzmaDec_Free(&state->decoder, &state->alloc); 44 | free(state); 45 | } 46 | 47 | void* lzma_decode(struct lzma_dec_state *state, void *src, size_t src_len, size_t *dest_len) { 48 | ELzmaStatus status; 49 | size_t current_size = 4096; 50 | void *target = malloc(current_size); 51 | *dest_len = 0; 52 | int done = 0; 53 | size_t pos_in = 0; 54 | 55 | while (!done) { 56 | size_t remain_out = current_size - *dest_len; 57 | size_t remain_in = src_len - pos_in; 58 | 59 | LzmaDec_DecodeToBuf( 60 | &state->decoder, 61 | target + *dest_len, &remain_out, 62 | src + pos_in , &remain_in, 63 | LZMA_FINISH_ANY, 64 | &status); 65 | 66 | pos_in += remain_in; 67 | *dest_len += remain_out; 68 | 69 | switch (status) { 70 | case LZMA_STATUS_NOT_FINISHED: 71 | current_size *= 2; 72 | target = realloc(target, current_size); 73 | break; 74 | default: 75 | done = 1; 76 | } 77 | } 78 | 79 | return target; 80 | } 81 | -------------------------------------------------------------------------------- /src/cbits/lzma/lzma.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef LZMA_H 3 | #define LZMA_H 4 | 5 | #include "LzmaDec.h" 6 | 7 | struct lzma_dec_state; 8 | 9 | struct lzma_dec_state *lzma_dec_init(void* props); 10 | void lzma_dec_free(struct lzma_dec_state *state); 11 | 12 | #endif 13 | -------------------------------------------------------------------------------- /src/cbits/rijndael.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef RIJNDAEL_H 3 | #define RIJNDAEL_H 4 | 5 | struct rijndael_sched_key; 6 | 7 | int rijndael_sched_key_size(); 8 | void init_tables(); 9 | 10 | void rijndael_init_key( 11 | struct rijndael_sched_key*, const unsigned char*, const unsigned int, const unsigned int); 12 | 13 | void encrypt_block(const struct rijndael_sched_key*, 14 | const unsigned char *, unsigned char *); 15 | 16 | #endif 17 | 18 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/README.md: -------------------------------------------------------------------------------- 1 | 2 | # So I made a copy of the tar package 3 | 4 | This is a straight copy of the [tar package][1], with only one 5 | minor addition: The magic ["ustar\0\0\0"][2] is recognized as 6 | `GnuFormat`. I don't know if Freenet creates bogus tar files or 7 | this is missing in the tar package. Yet, this way it works. 8 | 9 | [1]: http://hackage.haskell.org/package/tar 10 | [2]: https://github.com/waldheinz/ads/blob/master/src/Codec/Archive/Tar/Read.hs#L141 11 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar 4 | -- Copyright : (c) 2007 Bjorn Bringert, 5 | -- 2008 Andrea Vezzosi, 6 | -- 2008-2012 Duncan Coutts 7 | -- License : BSD3 8 | -- 9 | -- Maintainer : duncan@community.haskell.org 10 | -- Portability : portable 11 | -- 12 | -- Reading, writing and manipulating \"@.tar@\" archive files. 13 | -- 14 | -- This module uses common names and so is designed to be imported qualified: 15 | -- 16 | -- > import qualified Codec.Archive.Tar as Tar 17 | -- 18 | ----------------------------------------------------------------------------- 19 | module Codec.Archive.Tar ( 20 | 21 | -- | Tar archive files are used to store a collection of other files in a 22 | -- single file. They consists of a sequence of entries. Each entry describes 23 | -- a file or directory (or some other special kind of file). The entry stores 24 | -- a little bit of meta-data, in particular the file or directory name. 25 | -- 26 | -- Unlike some other archive formats, a tar file contains no index. The 27 | -- information about each entry is stored next to the entry. Because of this, 28 | -- tar files are almost always processed linearly rather than in a 29 | -- random-access fashion. 30 | -- 31 | -- The functions in this package are designed for working on tar files 32 | -- linearly and lazily. This makes it possible to do many operations in 33 | -- constant space rather than having to load the entire archive into memory. 34 | -- 35 | -- It can read and write standard POSIX tar files and also the GNU and old 36 | -- Unix V7 tar formats. The convenience functions that are provided in the 37 | -- "Codec.Archive.Tar.Entry" module for creating archive entries are 38 | -- primarily designed for standard portable archives. If you need to 39 | -- construct GNU format archives or exactly preserve file ownership and 40 | -- permissions then you will need to write some extra helper functions. 41 | -- 42 | -- This module contains just the simple high level operations without 43 | -- exposing the all the details of tar files. If you need to inspect tar 44 | -- entries in more detail or construct them directly then you also need 45 | -- the module "Codec.Archive.Tar.Entry". 46 | 47 | -- * High level \"all in one\" operations 48 | create, 49 | extract, 50 | 51 | -- * Notes 52 | -- ** Compressed tar archives 53 | -- | Tar files are commonly used in conjunction with gzip compression, as in 54 | -- \"@.tar.gz@\" or \"@.tar.bz2@\" files. This module does not directly 55 | -- handle compressed tar files however they can be handled easily by 56 | -- composing functions from this module and the modules 57 | -- @Codec.Compression.GZip@ or @Codec.Compression.BZip@ 58 | -- (see @zlib@ or @bzlib@ packages). 59 | -- 60 | -- Creating a compressed \"@.tar.gz@\" file is just a minor variation on the 61 | -- 'create' function, but where throw compression into the pipeline: 62 | -- 63 | -- > BS.writeFile tar . GZip.compress . Tar.write =<< Tar.pack base dir 64 | -- 65 | -- Similarly, extracting a compressed \"@.tar.gz@\" is just a minor variation 66 | -- on the 'extract' function where we use decompression in the pipeline: 67 | -- 68 | -- > Tar.unpack dir . Tar.read . GZip.decompress =<< BS.readFile tar 69 | -- 70 | 71 | -- ** Security 72 | -- | This is pretty important. A maliciously constructed tar archives could 73 | -- contain entries that specify bad file names. It could specify absolute 74 | -- file names like \"@\/etc\/passwd@\" or relative files outside of the 75 | -- archive like \"..\/..\/..\/something\". This security problem is commonly 76 | -- called a \"directory traversal vulnerability\". Historically, such 77 | -- vulnerabilities have been common in packages handling tar archives. 78 | -- 79 | -- The 'extract' and 'unpack' functions check for bad file names. See the 80 | -- 'checkSecurity' function for more details. If you need to do any custom 81 | -- unpacking then you should use this. 82 | 83 | -- ** Tarbombs 84 | -- | A \"tarbomb\" is a @.tar@ file where not all entries are in a 85 | -- subdirectory but instead files extract into the top level directory. The 86 | -- 'extract' function does not check for these however if you want to do 87 | -- that you can use the 'checkTarbomb' function like so: 88 | -- 89 | -- > Tar.unpack dir . Tar.checkTarbomb expectedDir 90 | -- > . Tar.read =<< BS.readFile tar 91 | -- 92 | -- In this case extraction will fail if any file is outside of @expectedDir@. 93 | 94 | -- * Converting between internal and external representation 95 | -- | Note, you cannot expect @write . read@ to give exactly the same output 96 | -- as input. You can expect the information to be preserved exactly however. 97 | -- This is because 'read' accepts common format variations while 'write' 98 | -- produces the standard format. 99 | read, 100 | write, 101 | 102 | -- * Packing and unpacking files to\/from internal representation 103 | -- | These functions are for packing and unpacking portable archives. They 104 | -- are not suitable in cases where it is important to preserve file ownership 105 | -- and permissions or to archive special files like named pipes and Unix 106 | -- device files. 107 | pack, 108 | unpack, 109 | 110 | -- * Types 111 | -- ** Tar entry type 112 | -- | This module provides only very simple and limited read-only access to 113 | -- the 'Entry' type. If you need access to the details or if you need to 114 | -- construct your own entries then also import "Codec.Archive.Tar.Entry". 115 | Entry, 116 | entryPath, 117 | entryContent, 118 | EntryContent(..), 119 | 120 | -- ** Sequences of tar entries 121 | Entries(..), 122 | mapEntries, 123 | mapEntriesNoFail, 124 | foldEntries, 125 | unfoldEntries, 126 | 127 | -- * Error handling 128 | -- | Reading tar files can fail if the data does not match the tar file 129 | -- format correctly. 130 | -- 131 | -- The style of error handling by returning structured errors. The pure 132 | -- functions in the library do not throw exceptions, they return the errors 133 | -- as data. The IO actions in the library can throw exceptions, in particular 134 | -- the 'unpack' action does this. All the error types used are an instance of 135 | -- the standard 'Exception' class so it is possible to 'throw' and 'catch' 136 | -- them. 137 | 138 | -- ** Errors from reading tar files 139 | FormatError(..), 140 | ) where 141 | 142 | import Codec.Archive.Tar.Types 143 | 144 | import Codec.Archive.Tar.Read 145 | import Codec.Archive.Tar.Write 146 | 147 | import Codec.Archive.Tar.Pack 148 | import Codec.Archive.Tar.Unpack 149 | 150 | import qualified Data.ByteString.Lazy as BS 151 | import Prelude hiding (read) 152 | 153 | -- | Create a new @\".tar\"@ file from a directory of files. 154 | -- 155 | -- It is equivalent to calling the standard @tar@ program like so: 156 | -- 157 | -- @$ tar -f tarball.tar -C base -c dir@ 158 | -- 159 | -- This assumes a directory @.\/base\/dir@ with files inside, eg 160 | -- @.\/base\/dir\/foo.txt@. The file names inside the resulting tar file will be 161 | -- relative to @dir@, eg @dir\/foo.txt@. 162 | -- 163 | -- This is a high level \"all in one\" operation. Since you may need variations 164 | -- on this function it is instructive to see how it is written. It is just: 165 | -- 166 | -- > BS.writeFile tar . Tar.write =<< Tar.pack base paths 167 | -- 168 | -- Notes: 169 | -- 170 | -- The files and directories must not change during this operation or the 171 | -- result is not well defined. 172 | -- 173 | -- The intention of this function is to create tarballs that are portable 174 | -- between systems. It is /not/ suitable for doing file system backups because 175 | -- file ownership and permissions are not fully preserved. File ownership is 176 | -- not preserved at all. File permissions are set to simple portable values: 177 | -- 178 | -- * @rw-r--r--@ for normal files 179 | -- 180 | -- * @rwxr-xr-x@ for executable files 181 | -- 182 | -- * @rwxr-xr-x@ for directories 183 | -- 184 | create :: FilePath -- ^ Path of the \".tar\" file to write. 185 | -> FilePath -- ^ Base directory 186 | -> [FilePath] -- ^ Files and directories to archive, relative to base dir 187 | -> IO () 188 | create tar base paths = BS.writeFile tar . write =<< pack base paths 189 | 190 | -- | Extract all the files contained in a @\".tar\"@ file. 191 | -- 192 | -- It is equivalent to calling the standard @tar@ program like so: 193 | -- 194 | -- @$ tar -x -f tarball.tar -C dir@ 195 | -- 196 | -- So for example if the @tarball.tar@ file contains @foo\/bar.txt@ then this 197 | -- will extract it to @dir\/foo\/bar.txt@. 198 | -- 199 | -- This is a high level \"all in one\" operation. Since you may need variations 200 | -- on this function it is instructive to see how it is written. It is just: 201 | -- 202 | -- > Tar.unpack dir . Tar.read =<< BS.readFile tar 203 | -- 204 | -- Notes: 205 | -- 206 | -- Extracting can fail for a number of reasons. The tarball may be incorrectly 207 | -- formatted. There may be IO or permission errors. In such cases an exception 208 | -- will be thrown and extraction will not continue. 209 | -- 210 | -- Since the extraction may fail part way through it is not atomic. For this 211 | -- reason you may want to extract into an empty directory and, if the 212 | -- extraction fails, recursively delete the directory. 213 | -- 214 | -- Security: only files inside the target directory will be written. Tarballs 215 | -- containing entries that point outside of the tarball (either absolute paths 216 | -- or relative paths) will be caught and an exception will be thrown. 217 | -- 218 | extract :: FilePath -- ^ Destination directory 219 | -> FilePath -- ^ Tarball 220 | -> IO () 221 | extract dir tar = unpack dir . read =<< BS.readFile tar 222 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Check.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar 4 | -- Copyright : (c) 2008-2012 Duncan Coutts 5 | -- 2011 Max Bolingbroke 6 | -- License : BSD3 7 | -- 8 | -- Maintainer : duncan@community.haskell.org 9 | -- Portability : portable 10 | -- 11 | -- Perform various checks on tar file entries. 12 | -- 13 | ----------------------------------------------------------------------------- 14 | 15 | {-# LANGUAGE DeriveDataTypeable #-} 16 | 17 | module Codec.Archive.Tar.Check ( 18 | 19 | -- * Security 20 | checkSecurity, 21 | FileNameError(..), 22 | 23 | -- * Tarbombs 24 | checkTarbomb, 25 | TarBombError(..), 26 | 27 | -- * Portability 28 | checkPortability, 29 | PortabilityError(..), 30 | PortabilityPlatform, 31 | ) where 32 | 33 | import Codec.Archive.Tar.Types 34 | 35 | import Data.Typeable (Typeable) 36 | import Control.Exception (Exception) 37 | import Control.Monad (MonadPlus(mplus)) 38 | import qualified System.FilePath as FilePath.Native 39 | ( splitDirectories, isAbsolute, isValid ) 40 | 41 | import qualified System.FilePath.Windows as FilePath.Windows 42 | import qualified System.FilePath.Posix as FilePath.Posix 43 | 44 | 45 | -------------------------- 46 | -- Security 47 | -- 48 | 49 | -- | This function checks a sequence of tar entries for file name security 50 | -- problems. It checks that: 51 | -- 52 | -- * file paths are not absolute 53 | -- 54 | -- * file paths do not contain any path components that are \"@..@\" 55 | -- 56 | -- * file names are valid 57 | -- 58 | -- These checks are from the perspective of the current OS. That means we check 59 | -- for \"@C:\blah@\" files on Windows and \"\/blah\" files on Unix. For archive 60 | -- entry types 'HardLink' and 'SymbolicLink' the same checks are done for the 61 | -- link target. A failure in any entry terminates the sequence of entries with 62 | -- an error. 63 | -- 64 | checkSecurity :: Entries e -> Entries (Either e FileNameError) 65 | checkSecurity = checkEntries checkEntrySecurity 66 | 67 | checkEntrySecurity :: Entry -> Maybe FileNameError 68 | checkEntrySecurity entry = case entryContent entry of 69 | HardLink link -> check (entryPath entry) 70 | `mplus` check (fromLinkTarget link) 71 | SymbolicLink link -> check (entryPath entry) 72 | `mplus` check (fromLinkTarget link) 73 | _ -> check (entryPath entry) 74 | 75 | where 76 | check name 77 | | FilePath.Native.isAbsolute name 78 | = Just $ AbsoluteFileName name 79 | 80 | | not (FilePath.Native.isValid name) 81 | = Just $ InvalidFileName name 82 | 83 | | any (=="..") (FilePath.Native.splitDirectories name) 84 | = Just $ InvalidFileName name 85 | 86 | | otherwise = Nothing 87 | 88 | -- | Errors arising from tar file names being in some way invalid or dangerous 89 | data FileNameError 90 | = InvalidFileName FilePath 91 | | AbsoluteFileName FilePath 92 | deriving (Typeable) 93 | 94 | instance Show FileNameError where 95 | show = showFileNameError Nothing 96 | 97 | instance Exception FileNameError 98 | 99 | showFileNameError :: Maybe PortabilityPlatform -> FileNameError -> String 100 | showFileNameError mb_plat err = case err of 101 | InvalidFileName path -> "Invalid" ++ plat ++ " file name in tar archive: " ++ show path 102 | AbsoluteFileName path -> "Absolute" ++ plat ++ " file name in tar archive: " ++ show path 103 | where plat = maybe "" (' ':) mb_plat 104 | 105 | 106 | -------------------------- 107 | -- Tarbombs 108 | -- 109 | 110 | -- | This function checks a sequence of tar entries for being a \"tar bomb\". 111 | -- This means that the tar file does not follow the standard convention that 112 | -- all entries are within a single subdirectory, e.g. a file \"foo.tar\" would 113 | -- usually have all entries within the \"foo/\" subdirectory. 114 | -- 115 | -- Given the expected subdirectory, this function checks all entries are within 116 | -- that subdirectroy. 117 | -- 118 | -- Note: This check must be used in conjunction with 'checkSecurity'. 119 | -- 120 | checkTarbomb :: FilePath -> Entries e -> Entries (Either e TarBombError) 121 | checkTarbomb expectedTopDir = checkEntries (checkEntryTarbomb expectedTopDir) 122 | 123 | checkEntryTarbomb :: FilePath -> Entry -> Maybe TarBombError 124 | checkEntryTarbomb expectedTopDir entry = 125 | case FilePath.Native.splitDirectories (entryPath entry) of 126 | (topDir:_) | topDir == expectedTopDir -> Nothing 127 | _ -> Just $ TarBombError expectedTopDir 128 | 129 | -- | An error that occurs if a tar file is a \"tar bomb\" that would extract 130 | -- files outside of the intended directory. 131 | data TarBombError = TarBombError FilePath 132 | deriving (Typeable) 133 | 134 | instance Exception TarBombError 135 | 136 | instance Show TarBombError where 137 | show (TarBombError expectedTopDir) 138 | = "File in tar archive is not in the expected directory " ++ show expectedTopDir 139 | 140 | 141 | -------------------------- 142 | -- Portability 143 | -- 144 | 145 | -- | This function checks a sequence of tar entries for a number of portability 146 | -- issues. It will complain if: 147 | -- 148 | -- * The old \"Unix V7\" or \"gnu\" formats are used. For maximum portability 149 | -- only the POSIX standard \"ustar\" format should be used. 150 | -- 151 | -- * A non-portable entry type is used. Only ordinary files, hard links, 152 | -- symlinks and directories are portable. Device files, pipes and others are 153 | -- not portable between all common operating systems. 154 | -- 155 | -- * Non-ASCII characters are used in file names. There is no agreed portable 156 | -- convention for Unicode or other extended character sets in file names in 157 | -- tar archives. 158 | -- 159 | -- * File names that would not be portable to both Unix and Windows. This check 160 | -- includes characters that are valid in both systems and the \'/\' vs \'\\\' 161 | -- directory separator conventions. 162 | -- 163 | checkPortability :: Entries e -> Entries (Either e PortabilityError) 164 | checkPortability = checkEntries checkEntryPortability 165 | 166 | checkEntryPortability :: Entry -> Maybe PortabilityError 167 | checkEntryPortability entry 168 | | entryFormat entry `elem` [V7Format, GnuFormat] 169 | = Just $ NonPortableFormat (entryFormat entry) 170 | 171 | | not (portableFileType (entryContent entry)) 172 | = Just NonPortableFileType 173 | 174 | | not (all portableChar posixPath) 175 | = Just $ NonPortableEntryNameChar posixPath 176 | 177 | | not (FilePath.Posix.isValid posixPath) 178 | = Just $ NonPortableFileName "unix" (InvalidFileName posixPath) 179 | | not (FilePath.Windows.isValid windowsPath) 180 | = Just $ NonPortableFileName "windows" (InvalidFileName windowsPath) 181 | 182 | | FilePath.Posix.isAbsolute posixPath 183 | = Just $ NonPortableFileName "unix" (AbsoluteFileName posixPath) 184 | | FilePath.Windows.isAbsolute windowsPath 185 | = Just $ NonPortableFileName "windows" (AbsoluteFileName windowsPath) 186 | 187 | | any (=="..") (FilePath.Posix.splitDirectories posixPath) 188 | = Just $ NonPortableFileName "unix" (InvalidFileName posixPath) 189 | | any (=="..") (FilePath.Windows.splitDirectories windowsPath) 190 | = Just $ NonPortableFileName "windows" (InvalidFileName windowsPath) 191 | 192 | | otherwise = Nothing 193 | 194 | where 195 | posixPath = fromTarPathToPosixPath (entryTarPath entry) 196 | windowsPath = fromTarPathToWindowsPath (entryTarPath entry) 197 | 198 | portableFileType ftype = case ftype of 199 | NormalFile {} -> True 200 | HardLink {} -> True 201 | SymbolicLink {} -> True 202 | Directory -> True 203 | _ -> False 204 | 205 | portableChar c = c <= '\127' 206 | 207 | -- | Potential portability issues in a tar archive 208 | data PortabilityError 209 | = NonPortableFormat Format 210 | | NonPortableFileType 211 | | NonPortableEntryNameChar FilePath 212 | | NonPortableFileName PortabilityPlatform FileNameError 213 | deriving (Typeable) 214 | 215 | -- | The name of a platform that portability issues arise from 216 | type PortabilityPlatform = String 217 | 218 | instance Exception PortabilityError 219 | 220 | instance Show PortabilityError where 221 | show (NonPortableFormat format) = "Archive is in the " ++ fmt ++ " format" 222 | where fmt = case format of V7Format -> "old Unix V7 tar" 223 | UstarFormat -> "ustar" -- I never generate this but a user might 224 | GnuFormat -> "GNU tar" 225 | show NonPortableFileType = "Non-portable file type in archive" 226 | show (NonPortableEntryNameChar posixPath) 227 | = "Non-portable character in archive entry name: " ++ show posixPath 228 | show (NonPortableFileName platform err) 229 | = showFileNameError (Just platform) err 230 | 231 | 232 | -------------------------- 233 | -- Utils 234 | -- 235 | 236 | checkEntries :: (Entry -> Maybe e') -> Entries e -> Entries (Either e e') 237 | checkEntries checkEntry = 238 | mapEntries (\entry -> maybe (Right entry) Left (checkEntry entry)) 239 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Entry.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar.Entry 4 | -- Copyright : (c) 2007 Bjorn Bringert, 5 | -- 2008 Andrea Vezzosi, 6 | -- 2008-2009 Duncan Coutts 7 | -- License : BSD3 8 | -- 9 | -- Maintainer : duncan@community.haskell.org 10 | -- Portability : portable 11 | -- 12 | -- Types and functions to manipulate tar entries. 13 | -- 14 | -- While the "Codec.Archive.Tar" module provides only the simple high level 15 | -- API, this module provides full access to the details of tar entries. This 16 | -- lets you inspect all the meta-data, construct entries and handle error cases 17 | -- more precisely. 18 | -- 19 | -- This module uses common names and so is designed to be imported qualified: 20 | -- 21 | -- > import qualified Codec.Archive.Tar as Tar 22 | -- > import qualified Codec.Archive.Tar.Entry as Tar 23 | -- 24 | ----------------------------------------------------------------------------- 25 | module Codec.Archive.Tar.Entry ( 26 | 27 | -- * Tar entry and associated types 28 | Entry(..), 29 | --TODO: should be the following with the Entry constructor not exported, 30 | -- but haddock cannot document that properly 31 | -- see http://trac.haskell.org/haddock/ticket/3 32 | --Entry(filePath, fileMode, ownerId, groupId, fileSize, modTime, 33 | -- fileType, linkTarget, headerExt, fileContent), 34 | entryPath, 35 | EntryContent(..), 36 | Ownership(..), 37 | 38 | FileSize, 39 | Permissions, 40 | EpochTime, 41 | DevMajor, 42 | DevMinor, 43 | TypeCode, 44 | Format(..), 45 | 46 | -- * Constructing simple entry values 47 | simpleEntry, 48 | fileEntry, 49 | directoryEntry, 50 | 51 | -- * Standard file permissions 52 | -- | For maximum portability when constructing archives use only these file 53 | -- permissions. 54 | ordinaryFilePermissions, 55 | executableFilePermissions, 56 | directoryPermissions, 57 | 58 | -- * Constructing entries from disk files 59 | packFileEntry, 60 | packDirectoryEntry, 61 | getDirectoryContentsRecursive, 62 | 63 | -- * TarPath type 64 | TarPath, 65 | toTarPath, 66 | fromTarPath, 67 | fromTarPathToPosixPath, 68 | fromTarPathToWindowsPath, 69 | 70 | -- * LinkTarget type 71 | LinkTarget, 72 | toLinkTarget, 73 | fromLinkTarget, 74 | fromLinkTargetToPosixPath, 75 | fromLinkTargetToWindowsPath, 76 | 77 | ) where 78 | 79 | import Codec.Archive.Tar.Types 80 | import Codec.Archive.Tar.Pack 81 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Pack.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | ----------------------------------------------------------------------------- 3 | -- | 4 | -- Module : Codec.Archive.Tar 5 | -- Copyright : (c) 2007 Bjorn Bringert, 6 | -- 2008 Andrea Vezzosi, 7 | -- 2008-2009 Duncan Coutts 8 | -- License : BSD3 9 | -- 10 | -- Maintainer : duncan@community.haskell.org 11 | -- Portability : portable 12 | -- 13 | ----------------------------------------------------------------------------- 14 | module Codec.Archive.Tar.Pack ( 15 | pack, 16 | packFileEntry, 17 | packDirectoryEntry, 18 | 19 | getDirectoryContentsRecursive, 20 | ) where 21 | 22 | import Codec.Archive.Tar.Types 23 | 24 | import qualified Data.ByteString.Lazy as BS 25 | import System.FilePath 26 | ( () ) 27 | import qualified System.FilePath as FilePath.Native 28 | ( addTrailingPathSeparator, hasTrailingPathSeparator ) 29 | import System.Directory 30 | ( getDirectoryContents, doesDirectoryExist, getModificationTime 31 | , Permissions(..), getPermissions ) 32 | #if MIN_VERSION_directory(1,2,0) 33 | -- The directory package switched to the new time package 34 | import Data.Time.Clock.POSIX 35 | ( utcTimeToPOSIXSeconds ) 36 | #else 37 | import System.Time 38 | ( ClockTime(..) ) 39 | #endif 40 | import System.IO 41 | ( IOMode(ReadMode), openBinaryFile, hFileSize ) 42 | import System.IO.Unsafe (unsafeInterleaveIO) 43 | 44 | -- | Creates a tar archive from a list of directory or files. Any directories 45 | -- specified will have their contents included recursively. Paths in the 46 | -- archive will be relative to the given base directory. 47 | -- 48 | -- This is a portable implementation of packing suitable for portable archives. 49 | -- In particular it only constructs 'NormalFile' and 'Directory' entries. Hard 50 | -- links and symbolic links are treated like ordinary files. It cannot be used 51 | -- to pack directories containing recursive symbolic links. Special files like 52 | -- FIFOs (named pipes), sockets or device files will also cause problems. 53 | -- 54 | -- An exception will be thrown for any file names that are too long to 55 | -- represent as a 'TarPath'. 56 | -- 57 | -- * This function returns results lazily. Subdirectories are scanned 58 | -- and files are read one by one as the list of entries is consumed. 59 | -- 60 | pack :: FilePath -- ^ Base directory 61 | -> [FilePath] -- ^ Files and directories to pack, relative to the base dir 62 | -> IO [Entry] 63 | pack baseDir paths0 = preparePaths baseDir paths0 >>= packPaths baseDir 64 | 65 | preparePaths :: FilePath -> [FilePath] -> IO [FilePath] 66 | preparePaths baseDir paths = 67 | fmap concat $ interleave 68 | [ do isDir <- doesDirectoryExist (baseDir path) 69 | if isDir 70 | then do entries <- getDirectoryContentsRecursive (baseDir path) 71 | let entries' = map (path ) entries 72 | dir = FilePath.Native.addTrailingPathSeparator path 73 | if null path then return entries' 74 | else return (dir : entries') 75 | else return [path] 76 | | path <- paths ] 77 | 78 | packPaths :: FilePath -> [FilePath] -> IO [Entry] 79 | packPaths baseDir paths = 80 | interleave 81 | [ do tarpath <- either fail return (toTarPath isDir relpath) 82 | if isDir then packDirectoryEntry filepath tarpath 83 | else packFileEntry filepath tarpath 84 | | relpath <- paths 85 | , let isDir = FilePath.Native.hasTrailingPathSeparator filepath 86 | filepath = baseDir relpath ] 87 | 88 | interleave :: [IO a] -> IO [a] 89 | interleave = unsafeInterleaveIO . go 90 | where 91 | go [] = return [] 92 | go (x:xs) = do 93 | x' <- x 94 | xs' <- interleave xs 95 | return (x':xs') 96 | 97 | -- | Construct a tar 'Entry' based on a local file. 98 | -- 99 | -- This sets the entry size, the data contained in the file and the file's 100 | -- modification time. If the file is executable then that information is also 101 | -- preserved. File ownership and detailed permissions are not preserved. 102 | -- 103 | -- * The file contents is read lazily. 104 | -- 105 | packFileEntry :: FilePath -- ^ Full path to find the file on the local disk 106 | -> TarPath -- ^ Path to use for the tar Entry in the archive 107 | -> IO Entry 108 | packFileEntry filepath tarpath = do 109 | mtime <- getModTime filepath 110 | perms <- getPermissions filepath 111 | file <- openBinaryFile filepath ReadMode 112 | size <- hFileSize file 113 | content <- BS.hGetContents file 114 | return (simpleEntry tarpath (NormalFile content (fromIntegral size))) { 115 | entryPermissions = if executable perms then executableFilePermissions 116 | else ordinaryFilePermissions, 117 | entryTime = mtime 118 | } 119 | 120 | -- | Construct a tar 'Entry' based on a local directory (but not its contents). 121 | -- 122 | -- The only attribute of the directory that is used is its modification time. 123 | -- Directory ownership and detailed permissions are not preserved. 124 | -- 125 | packDirectoryEntry :: FilePath -- ^ Full path to find the file on the local disk 126 | -> TarPath -- ^ Path to use for the tar Entry in the archive 127 | -> IO Entry 128 | packDirectoryEntry filepath tarpath = do 129 | mtime <- getModTime filepath 130 | return (directoryEntry tarpath) { 131 | entryTime = mtime 132 | } 133 | 134 | -- | This is a utility function, much like 'getDirectoryContents'. The 135 | -- difference is that it includes the contents of subdirectories. 136 | -- 137 | -- The paths returned are all relative to the top directory. Directory paths 138 | -- are distinguishable by having a trailing path separator 139 | -- (see 'FilePath.Native.hasTrailingPathSeparator'). 140 | -- 141 | -- All directories are listed before the files that they contain. Amongst the 142 | -- contents of a directory, subdirectories are listed after normal files. The 143 | -- overall result is that files within a directory will be together in a single 144 | -- contiguous group. This tends to improve file layout and IO performance when 145 | -- creating or extracting tar archives. 146 | -- 147 | -- * This function returns results lazily. Subdirectories are not scanned 148 | -- until the files entries in the parent directory have been consumed. 149 | -- 150 | getDirectoryContentsRecursive :: FilePath -> IO [FilePath] 151 | getDirectoryContentsRecursive dir0 = 152 | fmap tail (recurseDirectories dir0 [""]) 153 | 154 | recurseDirectories :: FilePath -> [FilePath] -> IO [FilePath] 155 | recurseDirectories _ [] = return [] 156 | recurseDirectories base (dir:dirs) = unsafeInterleaveIO $ do 157 | (files, dirs') <- collect [] [] =<< getDirectoryContents (base dir) 158 | 159 | files' <- recurseDirectories base (dirs' ++ dirs) 160 | return (dir : files ++ files') 161 | 162 | where 163 | collect files dirs' [] = return (reverse files, reverse dirs') 164 | collect files dirs' (entry:entries) | ignore entry 165 | = collect files dirs' entries 166 | collect files dirs' (entry:entries) = do 167 | let dirEntry = dir entry 168 | dirEntry' = FilePath.Native.addTrailingPathSeparator dirEntry 169 | isDirectory <- doesDirectoryExist (base dirEntry) 170 | if isDirectory 171 | then collect files (dirEntry':dirs') entries 172 | else collect (dirEntry:files) dirs' entries 173 | 174 | ignore ['.'] = True 175 | ignore ['.', '.'] = True 176 | ignore _ = False 177 | 178 | getModTime :: FilePath -> IO EpochTime 179 | getModTime path = do 180 | #if MIN_VERSION_directory(1,2,0) 181 | -- The directory package switched to the new time package 182 | t <- getModificationTime path 183 | return . floor . utcTimeToPOSIXSeconds $ t 184 | #else 185 | (TOD s _) <- getModificationTime path 186 | return $! fromIntegral s 187 | #endif 188 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Read.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar.Read 4 | -- Copyright : (c) 2007 Bjorn Bringert, 5 | -- 2008 Andrea Vezzosi, 6 | -- 2008-2009 Duncan Coutts, 7 | -- 2011 Max Bolingbroke 8 | -- License : BSD3 9 | -- 10 | -- Maintainer : duncan@community.haskell.org 11 | -- Portability : portable 12 | -- 13 | ----------------------------------------------------------------------------- 14 | 15 | {-# LANGUAGE DeriveDataTypeable #-} 16 | 17 | module Codec.Archive.Tar.Read (read, FormatError(..)) where 18 | 19 | import Codec.Archive.Tar.Types 20 | 21 | import Data.Char (ord) 22 | import Data.Int (Int64) 23 | import Numeric (readOct) 24 | import Control.Exception (Exception) 25 | import Data.Typeable (Typeable) 26 | import Control.Monad 27 | import Control.Applicative 28 | 29 | import qualified Data.ByteString.Lazy as BS 30 | import qualified Data.ByteString.Lazy.Char8 as BS.Char8 31 | import Data.ByteString.Lazy (ByteString) 32 | 33 | import Prelude hiding (read) 34 | 35 | 36 | -- | Errors that can be encountered when parsing a Tar archive. 37 | data FormatError 38 | = TruncatedArchive 39 | | ShortTrailer 40 | | BadTrailer 41 | | TrailingJunk 42 | | ChecksumIncorrect 43 | | NotTarFormat 44 | | UnrecognisedTarFormat 45 | | HeaderBadNumericEncoding 46 | deriving (Typeable) 47 | 48 | instance Show FormatError where 49 | show TruncatedArchive = "truncated tar archive" 50 | show ShortTrailer = "short tar trailer" 51 | show BadTrailer = "bad tar trailer" 52 | show TrailingJunk = "tar file has trailing junk" 53 | show ChecksumIncorrect = "tar checksum error" 54 | show NotTarFormat = "data is not in tar format" 55 | show UnrecognisedTarFormat = "tar entry not in a recognised format" 56 | show HeaderBadNumericEncoding = "tar header is malformed (bad numeric encoding)" 57 | 58 | instance Exception FormatError 59 | 60 | 61 | -- | Convert a data stream in the tar file format into an internal data 62 | -- structure. Decoding errors are reported by the 'Fail' constructor of the 63 | -- 'Entries' type. 64 | -- 65 | -- * The conversion is done lazily. 66 | -- 67 | read :: ByteString -> Entries FormatError 68 | read = unfoldEntries getEntry 69 | 70 | getEntry :: ByteString -> Either FormatError (Maybe (Entry, ByteString)) 71 | getEntry bs 72 | | BS.length header < 512 = Left TruncatedArchive 73 | 74 | -- Tar files end with at least two blocks of all '0'. Checking this serves 75 | -- two purposes. It checks the format but also forces the tail of the data 76 | -- which is necessary to close the file if it came from a lazily read file. 77 | | BS.head bs == 0 = case BS.splitAt 1024 bs of 78 | (end, trailing) 79 | | BS.length end /= 1024 -> Left ShortTrailer 80 | | not (BS.all (== 0) end) -> Left BadTrailer 81 | | not (BS.all (== 0) trailing) -> Left TrailingJunk 82 | | otherwise -> Right Nothing 83 | 84 | | otherwise = partial $ do 85 | 86 | case (chksum_, format_) of 87 | (Ok chksum, _ ) | correctChecksum header chksum -> return () 88 | (Ok _, Ok _) -> Error ChecksumIncorrect 89 | _ -> Error NotTarFormat 90 | 91 | -- These fields are partial, have to check them 92 | format <- format_; mode <- mode_; 93 | uid <- uid_; gid <- gid_; 94 | size <- size_; mtime <- mtime_; 95 | devmajor <- devmajor_; devminor <- devminor_; 96 | 97 | let content = BS.take size (BS.drop 512 bs) 98 | padding = (512 - size) `mod` 512 99 | bs' = BS.drop (512 + size + padding) bs 100 | 101 | entry = Entry { 102 | entryTarPath = TarPath name prefix, 103 | entryContent = case typecode of 104 | '\0' -> NormalFile content size 105 | '0' -> NormalFile content size 106 | '1' -> HardLink (LinkTarget linkname) 107 | '2' -> SymbolicLink (LinkTarget linkname) 108 | '3' -> CharacterDevice devmajor devminor 109 | '4' -> BlockDevice devmajor devminor 110 | '5' -> Directory 111 | '6' -> NamedPipe 112 | '7' -> NormalFile content size 113 | _ -> OtherEntryType typecode content size, 114 | entryPermissions = mode, 115 | entryOwnership = Ownership uname gname uid gid, 116 | entryTime = mtime, 117 | entryFormat = format 118 | } 119 | 120 | return (Just (entry, bs')) 121 | 122 | where 123 | header = BS.take 512 bs 124 | 125 | name = getString 0 100 header 126 | mode_ = getOct 100 8 header 127 | uid_ = getOct 108 8 header 128 | gid_ = getOct 116 8 header 129 | size_ = getOct 124 12 header 130 | mtime_ = getOct 136 12 header 131 | chksum_ = getOct 148 8 header 132 | typecode = getByte 156 header 133 | linkname = getString 157 100 header 134 | magic = getChars 257 8 header 135 | uname = getString 265 32 header 136 | gname = getString 297 32 header 137 | devmajor_ = getOct 329 8 header 138 | devminor_ = getOct 337 8 header 139 | prefix = getString 345 155 header 140 | -- trailing = getBytes 500 12 header 141 | 142 | format_ = case magic of 143 | "\0\0\0\0\0\0\0\0" -> return V7Format 144 | "ustar\NUL00" -> return UstarFormat 145 | "ustar \NUL" -> return GnuFormat 146 | "ustar\0\0\0" -> return GnuFormat 147 | _ -> Error UnrecognisedTarFormat 148 | 149 | correctChecksum :: ByteString -> Int -> Bool 150 | correctChecksum header checksum = checksum == checksum' 151 | where 152 | -- sum of all 512 bytes in the header block, 153 | -- treating each byte as an 8-bit unsigned value 154 | checksum' = BS.Char8.foldl' (\x y -> x + ord y) 0 header' 155 | -- treating the 8 bytes of chksum as blank characters. 156 | header' = BS.concat [BS.take 148 header, 157 | BS.Char8.replicate 8 ' ', 158 | BS.drop 156 header] 159 | 160 | -- * TAR format primitive input 161 | 162 | getOct :: Integral a => Int64 -> Int64 -> ByteString -> Partial FormatError a 163 | getOct off len = parseOct 164 | . BS.Char8.unpack 165 | . BS.Char8.takeWhile (\c -> c /= '\NUL' && c /= ' ') 166 | . BS.Char8.dropWhile (== ' ') 167 | . getBytes off len 168 | where 169 | parseOct "" = return 0 170 | -- As a star extension, octal fields can hold a base-256 value if the high 171 | -- bit of the initial character is set. The initial character can be: 172 | -- 0x80 ==> trailing characters hold a positive base-256 value 173 | -- 0xFF ==> trailing characters hold a negative base-256 value 174 | -- 175 | -- In both cases, there won't be a trailing NUL/space. 176 | -- 177 | -- GNU tar seems to contain a half-implementation of code that deals with 178 | -- extra bits in the first character, but I don't think it works and the 179 | -- docs I can find on star seem to suggest that these will always be 0, 180 | -- which is what I will assume. 181 | parseOct ('\128':xs) = return (readBytes xs) 182 | parseOct ('\255':xs) = return (negate (readBytes xs)) 183 | parseOct s = case readOct s of 184 | [(x,[])] -> return x 185 | _ -> Error HeaderBadNumericEncoding 186 | 187 | readBytes = go 0 188 | where go acc [] = acc 189 | go acc (x:xs) = go (acc * 256 + fromIntegral (ord x)) xs 190 | 191 | getBytes :: Int64 -> Int64 -> ByteString -> ByteString 192 | getBytes off len = BS.take len . BS.drop off 193 | 194 | getByte :: Int64 -> ByteString -> Char 195 | getByte off bs = BS.Char8.index bs off 196 | 197 | getChars :: Int64 -> Int64 -> ByteString -> String 198 | getChars off len = BS.Char8.unpack . getBytes off len 199 | 200 | getString :: Int64 -> Int64 -> ByteString -> String 201 | getString off len = BS.Char8.unpack . BS.Char8.takeWhile (/='\0') . getBytes off len 202 | 203 | data Partial e a = Error e | Ok a 204 | 205 | partial :: Partial e a -> Either e a 206 | partial (Error msg) = Left msg 207 | partial (Ok x) = Right x 208 | 209 | instance Functor (Partial e) where 210 | fmap = liftM 211 | 212 | instance Applicative (Partial e) where 213 | pure = return 214 | (<*>) = ap 215 | 216 | instance Monad (Partial e) where 217 | return = Ok 218 | Error m >>= _ = Error m 219 | Ok x >>= k = k x 220 | fail = error "fail @(Partial e)" 221 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Unpack.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar 4 | -- Copyright : (c) 2007 Bjorn Bringert, 5 | -- 2008 Andrea Vezzosi, 6 | -- 2008-2009 Duncan Coutts 7 | -- License : BSD3 8 | -- 9 | -- Maintainer : duncan@community.haskell.org 10 | -- Portability : portable 11 | -- 12 | ----------------------------------------------------------------------------- 13 | module Codec.Archive.Tar.Unpack ( 14 | unpack, 15 | ) where 16 | 17 | import Codec.Archive.Tar.Types 18 | import Codec.Archive.Tar.Check 19 | 20 | import qualified Data.ByteString.Lazy as BS 21 | import System.FilePath 22 | ( () ) 23 | import qualified System.FilePath as FilePath.Native 24 | ( takeDirectory ) 25 | import System.Directory 26 | ( createDirectoryIfMissing, copyFile ) 27 | import Control.Exception 28 | ( Exception, throwIO ) 29 | 30 | -- | Create local files and directories based on the entries of a tar archive. 31 | -- 32 | -- This is a portable implementation of unpacking suitable for portable 33 | -- archives. It handles 'NormalFile' and 'Directory' entries and has simulated 34 | -- support for 'SymbolicLink' and 'HardLink' entries. Links are implemented by 35 | -- copying the target file. This therefore works on Windows as well as Unix. 36 | -- All other entry types are ignored, that is they are not unpacked and no 37 | -- exception is raised. 38 | -- 39 | -- If the 'Entries' ends in an error then it is raised an an exception. Any 40 | -- files or directories that have been unpacked before the error was 41 | -- encountered will not be deleted. For this reason you may want to unpack 42 | -- into an empty directory so that you can easily clean up if unpacking fails 43 | -- part-way. 44 | -- 45 | -- On its own, this function only checks for security (using 'checkSecurity'). 46 | -- You can do other checks by applying checking functions to the 'Entries' that 47 | -- you pass to this function. For example: 48 | -- 49 | -- > unpack dir (checkTarbomb expectedDir entries) 50 | -- 51 | -- If you care about the priority of the reported errors then you may want to 52 | -- use 'checkSecurity' before 'checkTarbomb' or other checks. 53 | -- 54 | unpack :: Exception e => FilePath -> Entries e -> IO () 55 | unpack baseDir entries = unpackEntries [] (checkSecurity entries) 56 | >>= emulateLinks 57 | 58 | where 59 | -- We're relying here on 'checkSecurity' to make sure we're not scribbling 60 | -- files all over the place. 61 | 62 | unpackEntries _ (Fail err) = either throwIO throwIO err 63 | unpackEntries links Done = return links 64 | unpackEntries links (Next entry es) = case entryContent entry of 65 | NormalFile file _ -> extractFile path file 66 | >> unpackEntries links es 67 | Directory -> extractDir path 68 | >> unpackEntries links es 69 | HardLink link -> (unpackEntries $! saveLink path link links) es 70 | SymbolicLink link -> (unpackEntries $! saveLink path link links) es 71 | _ -> unpackEntries links es --ignore other file types 72 | where 73 | path = entryPath entry 74 | 75 | extractFile path content = do 76 | -- Note that tar archives do not make sure each directory is created 77 | -- before files they contain, indeed we may have to create several 78 | -- levels of directory. 79 | createDirectoryIfMissing True absDir 80 | BS.writeFile absPath content 81 | where 82 | absDir = baseDir FilePath.Native.takeDirectory path 83 | absPath = baseDir path 84 | 85 | extractDir path = createDirectoryIfMissing True (baseDir path) 86 | 87 | saveLink path link links = seq (length path) 88 | $ seq (length link') 89 | $ (path, link'):links 90 | where link' = fromLinkTarget link 91 | 92 | emulateLinks = mapM_ $ \(relPath, relLinkTarget) -> 93 | let absPath = baseDir relPath 94 | absTarget = FilePath.Native.takeDirectory absPath relLinkTarget 95 | in copyFile absTarget absPath 96 | -------------------------------------------------------------------------------- /src/lib/Codec/Archive/Tar/Write.hs: -------------------------------------------------------------------------------- 1 | ----------------------------------------------------------------------------- 2 | -- | 3 | -- Module : Codec.Archive.Tar.Write 4 | -- Copyright : (c) 2007 Bjorn Bringert, 5 | -- 2008 Andrea Vezzosi, 6 | -- 2008-2009 Duncan Coutts 7 | -- License : BSD3 8 | -- 9 | -- Maintainer : duncan@community.haskell.org 10 | -- Portability : portable 11 | -- 12 | ----------------------------------------------------------------------------- 13 | module Codec.Archive.Tar.Write (write) where 14 | 15 | import Codec.Archive.Tar.Types 16 | 17 | import Data.Char (ord) 18 | import Data.List (foldl') 19 | import Numeric (showOct) 20 | 21 | import qualified Data.ByteString.Lazy as BS 22 | import qualified Data.ByteString.Lazy.Char8 as BS.Char8 23 | import Data.ByteString.Lazy (ByteString) 24 | 25 | 26 | -- | Create the external representation of a tar archive by serialising a list 27 | -- of tar entries. 28 | -- 29 | -- * The conversion is done lazily. 30 | -- 31 | write :: [Entry] -> ByteString 32 | write es = BS.concat $ map putEntry es ++ [BS.replicate (512*2) 0] 33 | 34 | putEntry :: Entry -> ByteString 35 | putEntry entry = case entryContent entry of 36 | NormalFile content size -> BS.concat [ header, content, padding size ] 37 | OtherEntryType _ content size -> BS.concat [ header, content, padding size ] 38 | _ -> header 39 | where 40 | header = putHeader entry 41 | padding size = BS.replicate paddingSize 0 42 | where paddingSize = fromIntegral (negate size `mod` 512) 43 | 44 | putHeader :: Entry -> ByteString 45 | putHeader entry = 46 | BS.Char8.pack $ take 148 block 47 | ++ putOct 7 checksum 48 | ++ ' ' : drop 156 block 49 | -- ++ putOct 8 checksum 50 | -- ++ drop 156 block 51 | where 52 | block = putHeaderNoChkSum entry 53 | checksum = foldl' (\x y -> x + ord y) 0 block 54 | 55 | putHeaderNoChkSum :: Entry -> String 56 | putHeaderNoChkSum Entry { 57 | entryTarPath = TarPath name prefix, 58 | entryContent = content, 59 | entryPermissions = permissions, 60 | entryOwnership = ownership, 61 | entryTime = modTime, 62 | entryFormat = format 63 | } = 64 | 65 | concat 66 | [ putString 100 $ name 67 | , putOct 8 $ permissions 68 | , putOct 8 $ ownerId ownership 69 | , putOct 8 $ groupId ownership 70 | , putOct 12 $ contentSize 71 | , putOct 12 $ modTime 72 | , fill 8 $ ' ' -- dummy checksum 73 | , putChar8 $ typeCode 74 | , putString 100 $ linkTarget 75 | ] ++ 76 | case format of 77 | V7Format -> 78 | fill 255 '\NUL' 79 | UstarFormat -> concat 80 | [ putString 8 $ "ustar\NUL00" 81 | , putString 32 $ ownerName ownership 82 | , putString 32 $ groupName ownership 83 | , putOct 8 $ deviceMajor 84 | , putOct 8 $ deviceMinor 85 | , putString 155 $ prefix 86 | , fill 12 $ '\NUL' 87 | ] 88 | GnuFormat -> concat 89 | [ putString 8 $ "ustar \NUL" 90 | , putString 32 $ ownerName ownership 91 | , putString 32 $ groupName ownership 92 | , putGnuDev 8 $ deviceMajor 93 | , putGnuDev 8 $ deviceMinor 94 | , putString 155 $ prefix 95 | , fill 12 $ '\NUL' 96 | ] 97 | where 98 | (typeCode, contentSize, linkTarget, 99 | deviceMajor, deviceMinor) = case content of 100 | NormalFile _ size -> ('0' , size, [], 0, 0) 101 | Directory -> ('5' , 0, [], 0, 0) 102 | SymbolicLink (LinkTarget link) -> ('2' , 0, link, 0, 0) 103 | HardLink (LinkTarget link) -> ('1' , 0, link, 0, 0) 104 | CharacterDevice major minor -> ('3' , 0, [], major, minor) 105 | BlockDevice major minor -> ('4' , 0, [], major, minor) 106 | NamedPipe -> ('6' , 0, [], 0, 0) 107 | OtherEntryType code _ size -> (code, size, [], 0, 0) 108 | 109 | putGnuDev w n = case content of 110 | CharacterDevice _ _ -> putOct w n 111 | BlockDevice _ _ -> putOct w n 112 | _ -> replicate w '\NUL' 113 | 114 | -- * TAR format primitive output 115 | 116 | type FieldWidth = Int 117 | 118 | putString :: FieldWidth -> String -> String 119 | putString n s = take n s ++ fill (n - length s) '\NUL' 120 | 121 | --TODO: check integer widths, eg for large file sizes 122 | putOct :: (Integral a, Show a) => FieldWidth -> a -> String 123 | putOct n x = 124 | let octStr = take (n-1) $ showOct x "" 125 | in fill (n - length octStr - 1) '0' 126 | ++ octStr 127 | ++ putChar8 '\NUL' 128 | 129 | putChar8 :: Char -> String 130 | putChar8 c = [c] 131 | 132 | fill :: FieldWidth -> Char -> String 133 | fill n c = replicate n c 134 | -------------------------------------------------------------------------------- /src/lib/Freenet.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet ( 5 | Freenet, mkFreenet, shutdownFreenet 6 | ) where 7 | 8 | import Control.Concurrent ( forkIO ) 9 | import Control.Concurrent.STM 10 | import Control.Monad ( void ) 11 | import qualified Data.Configurator as CFG 12 | import qualified Data.Configurator.Types as CFG 13 | import qualified Data.Text as T 14 | 15 | import Freenet.Chk 16 | import Freenet.Companion 17 | import Freenet.Ssk 18 | import Freenet.Store 19 | import Freenet.Types 20 | import Node 21 | 22 | data Freenet a = Freenet 23 | { fnNode :: Node a 24 | , fnCompanion :: Maybe Companion 25 | , fnChkStore :: StoreFile ChkBlock 26 | , fnSskStore :: StoreFile SskBlock 27 | } 28 | 29 | mkFreenet 30 | :: Node a 31 | -> Maybe Companion 32 | -> StoreFile ChkBlock 33 | -> StoreFile SskBlock 34 | -> STM (Freenet a) 35 | mkFreenet node comp chk ssk = do 36 | return $ Freenet node comp chk ssk 37 | 38 | shutdownFreenet :: Freenet a -> IO () 39 | shutdownFreenet fn = do 40 | shutdownStore $ fnChkStore fn 41 | shutdownStore $ fnSskStore fn 42 | 43 | {- 44 | -- | initializes Freenet subsystem 45 | initFn :: CFG.Config -> IO (Freenet a) 46 | initFn cfg = do 47 | 48 | chkIncoming <- newBroadcastTChanIO 49 | sskIncoming <- newBroadcastTChanIO 50 | 51 | let fn = FN Nothing chkIncoming sskIncoming 52 | 53 | -- companion 54 | let ccfg = CFG.subconfig "companion" cfg 55 | chost <- CFG.lookup ccfg "host" :: IO (Maybe String) 56 | case chost of 57 | Nothing -> return fn 58 | Just _ -> do 59 | comp <- FC.initCompanion ccfg (offerChk fn) (offerSsk fn) 60 | return $ fn { fnCompanion = Just comp } 61 | -} 62 | 63 | -------------------------------------------------------------------------------- /src/lib/Freenet/Archive.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Archive ( 5 | Archive, fetchArchive, 6 | ArchiveCache, mkArchiveCache 7 | ) where 8 | 9 | import qualified Codec.Archive.Tar as TAR 10 | import qualified Codec.Archive.Zip as ZIP 11 | import Control.Applicative ( (<$>) ) 12 | import Control.Concurrent ( forkIO ) 13 | import Control.Concurrent.STM 14 | import qualified Control.Exception as CE 15 | import Control.Monad ( when, void ) 16 | import qualified Data.ByteString.Lazy as BSL 17 | import qualified Data.Cache.LRU as LRU 18 | import qualified Data.HashMap.Strict as Map 19 | import qualified Data.Text as T 20 | import System.Log.Logger 21 | 22 | import Freenet.Metadata 23 | import Freenet.SplitFile 24 | import Freenet.URI 25 | import Requests 26 | import Utils 27 | 28 | -- | 29 | -- An Archive is just a map from file names to file contents. 30 | type Archive = Map.HashMap String BSL.ByteString 31 | 32 | logD :: String -> IO () 33 | logD m = debugM "freenet.archive" m 34 | 35 | newtype ArchiveKey = ArchiveKey (RedirectTarget, ArchiveType) deriving ( Eq, Ord ) 36 | 37 | newtype ArchiveProgress = ArchiveProgress { unProgress :: TMVar (Either T.Text Archive) } 38 | 39 | data ArchiveCache = ArchiveCache 40 | { acLru :: TVar (LRU.LRU ArchiveKey ArchiveProgress) 41 | } 42 | 43 | mkArchiveCache :: Integer -> IO ArchiveCache 44 | mkArchiveCache size = do 45 | lru <- newTVarIO $ LRU.newLRU $ Just size 46 | return $ ArchiveCache lru 47 | 48 | fetchArchive :: UriFetch a => a -> ArchiveCache -> RedirectTarget -> ArchiveType -> IO (Either T.Text Archive) 49 | fetchArchive fetch ac tgt tp = do 50 | let ak = ArchiveKey (tgt, tp) 51 | 52 | (prog, needStart) <- atomically $ do 53 | lru <- readTVar (acLru ac) 54 | let (lru', mprog) = LRU.lookup ak lru 55 | 56 | case mprog of 57 | Just p -> writeTVar (acLru ac) lru' >> return (p, False) 58 | Nothing -> do 59 | p <- ArchiveProgress <$> newEmptyTMVar 60 | writeTVar (acLru ac) $ LRU.insert ak p lru 61 | return (p, True) 62 | 63 | when needStart $ void $ forkIO $ do 64 | arch <- fetchArchive' fetch tgt tp 65 | atomically $ do 66 | putTMVar (unProgress prog) arch 67 | -- drop failures from cache so they can be fetched again if the user dares 68 | case arch of 69 | Left _ -> modifyTVar' (acLru ac) $ \lru -> let (lru', _) = LRU.delete ak lru in lru' 70 | _ -> return () 71 | 72 | 73 | atomically $ readTMVar (unProgress prog) 74 | 75 | fetchArchive' :: UriFetch a => a -> RedirectTarget -> ArchiveType -> IO (Either T.Text Archive) 76 | fetchArchive' fetch tgt tp = do 77 | logD $ "fetching archive " ++ show tgt 78 | 79 | arch <- fetchRedirect' fetch tgt 80 | 81 | let 82 | parseZip zbs = CE.catch (return $ Right go) handler where 83 | handler :: CE.ErrorCall -> IO (Either T.Text Archive) 84 | handler e = return $ Left $ T.pack $ show e 85 | 86 | entries = ZIP.zEntries $ ZIP.toArchive zbs 87 | 88 | go = Map.fromList $ map (\e -> (ZIP.eRelativePath e, ZIP.fromEntry e)) entries 89 | 90 | case arch of 91 | Left e -> return $ Left e 92 | Right bs -> case tp of 93 | TAR -> return $ Right $ TAR.foldEntries 94 | (\e m -> case TAR.entryContent e of 95 | TAR.NormalFile ebs _ -> Map.insert (TAR.entryPath e) ebs m 96 | _ -> m 97 | ) Map.empty (const Map.empty) $ TAR.read bs 98 | ZIP -> parseZip bs 99 | 100 | fetchRedirect' :: UriFetch a => a -> RedirectTarget -> IO (Either T.Text BSL.ByteString) 101 | fetchRedirect' fetch (RedirectKey _ uri) = do 102 | logD $ "fetch redirect' to " ++ show uri 103 | 104 | mbs <- getUriData fetch uri 105 | case mbs of 106 | Left e -> return $ Left $ "fetchRedirect': error with requestNodeData: " `T.append` e 107 | Right (bs, len) -> case parseMetadata (BSL.take (fromIntegral len) $ bsFromStrict bs) of 108 | Left _ -> return $ Right $ BSL.take (fromIntegral len) $ bsFromStrict bs 109 | Right md -> case md of 110 | ArchiveManifest (RedirectSplitFile sf) _ _ _ -> fetchSplitFile fetch sf 111 | _ -> return $ Left $ "fetchRedirect': what shall I do with metadata: " `T.append` (T.pack $ show md) 112 | 113 | fetchRedirect' fetch (RedirectSplitFile sf) = fetchSplitFile fetch sf 114 | -------------------------------------------------------------------------------- /src/lib/Freenet/Base64.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | -- | Freenet's modified Base64 encoding 5 | module Freenet.Base64 ( 6 | FreenetBase64(..), 7 | 8 | -- * Integer utilities 9 | i2bs, bs2i, bsToPosI, posIToBs 10 | ) where 11 | 12 | import Data.Bits ( shiftL, shiftR ) 13 | import qualified Data.ByteString as B 14 | import qualified Data.ByteString.Lazy as BL 15 | import qualified Data.ByteString.Lazy.Char8 as BLC 16 | import Data.ByteString.Base64 17 | import Data.Monoid ( (<>) ) 18 | import qualified Data.Text as T 19 | import Data.Word 20 | 21 | import Utils 22 | 23 | -- | 24 | -- things that have a special base64 representation in Freenet 25 | class FreenetBase64 a where 26 | fromBase64' :: T.Text -> Either T.Text a 27 | toBase64' :: a -> T.Text 28 | 29 | toStd :: Word8 -> Word8 30 | toStd x 31 | | x == (toEnum $ fromEnum '~') = toEnum $ fromEnum '+' 32 | | x == (toEnum $ fromEnum '-') = toEnum $ fromEnum '/' 33 | | otherwise = x 34 | 35 | fromStd :: Char -> Char 36 | fromStd x 37 | | x == '+' = '~' 38 | | x == '/' = '-' 39 | | otherwise = x 40 | 41 | toStandardAlphabet :: B.ByteString -> B.ByteString 42 | toStandardAlphabet = B.map toStd 43 | 44 | fromBase64Bs :: T.Text -> Either T.Text B.ByteString 45 | fromBase64Bs s = case dec of 46 | Left e -> Left $ T.pack e 47 | Right b -> Right b 48 | where 49 | dec = decode $ toStandardAlphabet bs 50 | bs = bsToStrict $ BL.concat 51 | [ BLC.pack $ T.unpack s 52 | , BL.replicate pad $ toEnum $ fromEnum '=' 53 | ] 54 | pad 55 | | T.length s `rem` 4 == 0 = 0 56 | | otherwise = fromIntegral $ 4 - (T.length s `rem` 4) 57 | 58 | -- | convert bytes to Freenet's modified base64 59 | toBase64Bs :: B.ByteString -> T.Text 60 | toBase64Bs b = T.takeWhile (/= '=') $ 61 | T.map fromStd $ T.pack $ map (toEnum . fromEnum) $ B.unpack $ encode b 62 | 63 | instance FreenetBase64 B.ByteString where 64 | fromBase64' = fromBase64Bs 65 | toBase64' = toBase64Bs 66 | 67 | instance FreenetBase64 Word8 where 68 | toBase64' = toBase64Bs . B.singleton 69 | fromBase64' t = case fromBase64Bs t of 70 | Left e -> Left e 71 | Right bs -> if B.length bs == 1 72 | then Right $ B.index bs 0 73 | else Left "expected 1 byte" 74 | 75 | ------------------------------------------------------------------------ 76 | -- base64 encoded integers 77 | ------------------------------------------------------------------------ 78 | 79 | bs2i :: B.ByteString -> Integer 80 | bs2i b 81 | | B.length b' == 0 = 0 82 | | sign = go b' - 2 ^ (B.length b' * 8) 83 | | otherwise = go b' 84 | where 85 | b' = b -- B.dropWhile (==0) b -- drop leading 0s 86 | sign = B.index b' 0 > 127 87 | go = B.foldl' (\i bb -> (i `shiftL` 8) + fromIntegral bb) 0 88 | 89 | bsToPosI :: B.ByteString -> Integer 90 | bsToPosI = B.foldl' (\i bb -> (i `shiftL` 8) + fromIntegral bb) 0 91 | 92 | posIToBs :: Integer -> B.ByteString 93 | posIToBs x = B.reverse $ B.unfoldr go x where 94 | go i = if i == 0 95 | then Nothing 96 | else Just (fromIntegral i, i `shiftR` 8) 97 | 98 | i2bs :: Integer -> B.ByteString 99 | i2bs x 100 | | x == 0 = B.singleton 0 101 | | x < 0 = (\xx -> B.reverse $ B.unfoldr go xx) $ 2 ^ (8 * bytes) + x 102 | | otherwise = if B.index posRes 0 > 127 then B.singleton 0 <> posRes else posRes 103 | where 104 | posRes = B.reverse $ B.unfoldr go x 105 | bytes = (integerLogBase 2 (abs x) + 1) `quot` 8 + 1 106 | go i = if i == 0 then Nothing 107 | else Just (fromIntegral i, i `shiftR` 8) 108 | 109 | -- Compute the (floor of the) log of i in base b. 110 | -- Simplest way would be just divide i by b until it's smaller then b, 111 | -- but that would be very slow! We are just slightly more clever. 112 | integerLogBase :: Integer -> Integer -> Int 113 | integerLogBase b i = 114 | if i < b then 115 | 0 116 | else 117 | -- Try squaring the base first to cut down the number of divisions. 118 | let l = 2 * integerLogBase (b*b) i 119 | doDiv :: Integer -> Int -> Int 120 | doDiv ii ll = if ii < b then ll else doDiv (ii `div` b) (ll+1) 121 | in doDiv (i `div` (b^l)) l 122 | 123 | instance FreenetBase64 Integer where 124 | fromBase64' s = fromBase64' s >>= \bs -> Right (bsToPosI bs) 125 | toBase64' = toBase64' . i2bs 126 | -------------------------------------------------------------------------------- /src/lib/Freenet/Chk.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Chk ( 5 | -- * Working with CHKs 6 | ChkRequest(..), ChkBlock(..), mkChkBlock, decompressChk, 7 | encryptChk, chkBlockUri, chkDataSize, 8 | decryptChk, 9 | 10 | -- * CHK Headers 11 | ChkHeader, mkChkHeader, unChkHeader, chkHeaderHashId, 12 | chkHeaderHash, chkHeaderCipherLen 13 | ) where 14 | 15 | import Control.Applicative ( (<$>), (<*>) ) 16 | import Control.Monad ( mzero ) 17 | import Control.Monad.ST ( runST ) 18 | import Crypto.Cipher.AES 19 | import Data.Aeson 20 | import Data.Binary 21 | import Data.Binary.Get 22 | import Data.Binary.Put ( putByteString, putWord16be, runPut ) 23 | import Data.Bits ( (.&.), shiftL ) 24 | import qualified Data.ByteString as BS 25 | import qualified Data.ByteString.Lazy as BSL 26 | import Data.Digest.Pure.SHA 27 | import qualified Data.Text as T 28 | 29 | import Freenet.Base64 30 | import Freenet.Compression 31 | import Freenet.Pcfb 32 | import qualified Freenet.Rijndael as RD 33 | import Freenet.Types 34 | import Freenet.URI 35 | import Utils 36 | 37 | ------------------------------------------------------------------------------ 38 | -- CHK headers 39 | ------------------------------------------------------------------------------ 40 | 41 | -- | 42 | -- The header required to verify a CHK data block, consisting of 43 | -- 0 2 word16, hash id, always 1 for SHA256 44 | -- 2 32 SHA256(payload) 45 | -- 34 2 encrypted(word16) true payload size, before padding to 32kiB block size 46 | -- ------ 47 | -- 36 bytes total 48 | newtype ChkHeader = ChkHeader { unChkHeader :: BS.ByteString } deriving ( Eq ) 49 | 50 | -- | 51 | -- The size of a CHK block's header, which is 36 bytes. 52 | chkHeaderSize :: Int 53 | chkHeaderSize = 36 54 | 55 | instance Show ChkHeader where 56 | show (ChkHeader bs) = T.unpack $ toBase64' bs 57 | 58 | instance Binary ChkHeader where 59 | put (ChkHeader h) = putByteString h 60 | get = ChkHeader <$> getByteString chkHeaderSize 61 | 62 | -- | 63 | -- Creates right the header from a bytestring of 36 bytes. 64 | mkChkHeader :: BS.ByteString -> Either T.Text ChkHeader 65 | mkChkHeader bs 66 | | BS.length bs == chkHeaderSize = Right $ ChkHeader bs 67 | | otherwise = Left $ "CHK header length must be 36 bytes" 68 | 69 | -- | 70 | -- Returns the payload's SHA256 digest. 71 | chkHeaderHash :: ChkHeader -> BS.ByteString 72 | chkHeaderHash = BS.take 32 . BS.drop 2 . unChkHeader 73 | 74 | -- | 75 | -- The length of the CHK plaintext is stored encrypted 76 | -- in it's header, and this function extracts it. 77 | chkHeaderCipherLen :: ChkHeader -> BS.ByteString 78 | chkHeaderCipherLen = BS.drop 34 . unChkHeader 79 | 80 | -- | 81 | -- The hash algoritm used to generate the digest at offset 2, 82 | -- only a value of 1, indicating SHA256, is currently used. 83 | chkHeaderHashId :: ChkHeader -> Word16 84 | chkHeaderHashId = runGet get . bsFromStrict . unChkHeader 85 | 86 | data ChkBlock = ChkBlock 87 | { chkBlockKey :: ! Key -- ^ location 88 | , chkBlockCrypto :: ! Word8 -- ^ crypto algorithm, 2 -> AES_PCFB_256_SHA256, 3 -> AES_CTR_256_SHA256 89 | , chkBlockHeader :: ! ChkHeader -- ^ headers 90 | , chkBlockData :: ! BS.ByteString -- ^ data 91 | } 92 | 93 | instance Show ChkBlock where 94 | show (ChkBlock k _ h d) = "ChkFound {k=" ++ show k ++ ", h=" ++ (show h) ++ ", len=" ++ (show $ BS.length d) ++ "}" 95 | 96 | instance ToJSON ChkBlock where 97 | toJSON (ChkBlock k c h d) = object 98 | [ "location" .= k 99 | , "algorithm" .= c 100 | , "header" .= (toJSON . toBase64' . unChkHeader) h 101 | , "data" .= (toJSON . toBase64') d 102 | ] 103 | 104 | -- | 105 | -- Size of the CHK payload, which is 32kiB. 106 | chkDataSize :: Num a => a 107 | chkDataSize = 32768 108 | 109 | instance StorePersistable ChkBlock where 110 | storeSize = const $ 32 + chkHeaderSize + chkDataSize + 1 -- first is key and last is crypto 111 | storePut = \(ChkBlock k calg h d) -> put k >> put calg >> put h >> putByteString d 112 | storeGet = do 113 | (k, calg, h, d) <- (,,,) <$> get <*> get <*> get <*> getByteString chkDataSize 114 | case mkChkBlock k h d calg of 115 | Right df -> return df 116 | Left e -> fail $ T.unpack e 117 | 118 | -- | find the routing key for a DataFound 119 | instance DataBlock ChkBlock where 120 | dataBlockLocation (ChkBlock k c _ _) = freenetLocation k $ (1 `shiftL` 8) + (fromIntegral $ c .&. 0xff) 121 | decryptDataBlock = decryptChk 122 | 123 | instance Binary ChkBlock where 124 | put = storePut 125 | get = storeGet 126 | 127 | chkBlockUri 128 | :: ChkBlock 129 | -> Key 130 | -> URI 131 | chkBlockUri blk ck = CHK (chkBlockKey blk) ck (mkChkExtra 3 (-1) False) [] 132 | 133 | -- | 134 | -- creates a @ChkBlock@ from it's ingredients, verifying the hash and size of 135 | -- the data block 136 | mkChkBlock :: Key -> ChkHeader -> BS.ByteString -> Word8 -> Either T.Text ChkBlock 137 | mkChkBlock k h d calg 138 | | hash /= (bsFromStrict $ unKey k) = Left "hash mismatch" 139 | | BS.length d /= chkDataSize = Left "CHK data must be 32kiB" 140 | | otherwise = Right $ ChkBlock k calg h d 141 | where 142 | hash = bytestringDigest $ sha256 $ BSL.fromChunks [unChkHeader h, d] 143 | 144 | -- | 145 | -- given the secret crypto key (second part of URIs), data found can be 146 | -- decrypted to get the source data back 147 | decryptChk 148 | :: ChkBlock -- ^ the encrypted data together with their headers 149 | -> Key -- ^ the secret crypto key (second part of URIs) 150 | -> Either T.Text (BS.ByteString, Int) -- ^ (decrypted payload, original length) 151 | decryptChk (ChkBlock _ calg header ciphertext) key 152 | | calg == 3 = decryptChkAesCtr header ciphertext key 153 | | calg == 2 = decryptChkAesPcfb header ciphertext key 154 | | otherwise = Left $ T.pack $ "unknown CHK crypto algorithm " ++ show calg 155 | 156 | decryptChkAesPcfb :: ChkHeader -> BS.ByteString -> Key -> Either T.Text (BS.ByteString, Int) 157 | decryptChkAesPcfb header ciphertext key 158 | | predIv /= iv = Left "CHK hash mismatch" 159 | | len > BS.length plaintext' = Left $ T.pack $ "invalid CHK length " ++ show len 160 | | otherwise = Right (plaintext, len) 161 | where 162 | (plaintext', headers') = runST $ do 163 | pcfb <- mkPCFB (RD.initKey 32 $ unKey key) $ BS.replicate 32 0 -- ^ the IV is all zero, but 164 | h' <- pcfbDecipher pcfb $ BS.drop 2 $ unChkHeader header -- ^ this is said to serve as IV 165 | p <- pcfbDecipher pcfb ciphertext 166 | return (p, h') 167 | plaintext = plaintext' -- BS.take len plaintext' 168 | iv = BS.take 32 headers' 169 | predIv = bsToStrict $ bytestringDigest $ sha256 (bsFromStrict $ unKey key) 170 | len = fromIntegral $ runGet getWord16be $ bsFromStrict $ BS.drop 32 headers' 171 | 172 | decryptChkAesCtr :: ChkHeader -> BS.ByteString -> Key -> Either T.Text (BS.ByteString, Int) 173 | decryptChkAesCtr header ciphertext key 174 | | len > BS.length plaintext' = Left "invalid length" 175 | | mac /= bsFromStrict hash = Left "mac mismatch when verifying CHK payload" 176 | | otherwise = Right (plaintext', len) 177 | where 178 | hash = chkHeaderHash header 179 | cipherLen = chkHeaderCipherLen header 180 | iv = BS.take 16 hash 181 | aes = initAES $ unKey key 182 | plaintext'' = decryptCTR aes iv $ BS.concat [ciphertext, cipherLen] -- TODO get rid of the concat 183 | (plaintext', lenbytes) = BS.splitAt (BS.length ciphertext) plaintext'' 184 | len = fromIntegral $ runGet getWord16be $ bsFromStrict lenbytes 185 | mac = bytestringDigest (hmacSha256 (bsFromStrict $ unKey key) (bsFromStrict plaintext'')) 186 | 187 | -- | 188 | -- encrypts some data (which must be <= chkDataSize in length) using some encryption 189 | -- key, and returns the resulting @ChkBlock@ 190 | encryptChk :: BS.ByteString -> Key -> ChkBlock 191 | encryptChk d k 192 | | payloadSize > chkDataSize = error "CHK payload > 32kiB" 193 | | otherwise = ChkBlock loc 3 hdr ciphertext 194 | where 195 | hdr = ChkHeader $ bsToStrict $ runPut $ putWord16be 1 >> putByteString mac >> putByteString cipherLen 196 | payloadSize = BS.length d 197 | padding = BS.replicate (chkDataSize - payloadSize) 0 198 | plaintext = BS.concat [d, padding, bsToStrict $ runPut $ putWord16be $ fromIntegral payloadSize] 199 | (ciphertext, cipherLen) = BS.splitAt chkDataSize $ encryptCTR aes iv plaintext 200 | aes = initAES $ unKey k 201 | iv = BS.take 16 mac 202 | mac = bsToStrict $ bytestringDigest $ hmacSha256 (bsFromStrict $ unKey k) (bsFromStrict plaintext) 203 | loc = mkKey' $ bsToStrict $ bytestringDigest $ sha256 $ BSL.fromChunks [unChkHeader hdr, ciphertext] 204 | 205 | ------------------------------------------------------------------------------------------------- 206 | -- Requesting CHKs 207 | ------------------------------------------------------------------------------------------------- 208 | 209 | data ChkRequest = ChkRequest 210 | { chkReqLocation :: ! Key -- ^ the location of the data 211 | , chkReqHashAlg :: ! Word8 -- ^ the hash algorithm to use 212 | } deriving ( Show ) 213 | 214 | instance Binary ChkRequest where 215 | put (ChkRequest l h) = put l >> put h 216 | get = ChkRequest <$> get <*> get 217 | 218 | instance FromJSON ChkRequest where 219 | parseJSON (Object v) = ChkRequest 220 | <$> v .: "location" 221 | <*> v .: "algorithm" 222 | parseJSON _ = mzero 223 | 224 | instance DataRequest ChkRequest where 225 | dataRequestLocation (ChkRequest l a) = freenetLocation l $ (1 `shiftL` 8) + (fromIntegral $ a .&. 0xff) 226 | 227 | decompressChk 228 | :: CompressionCodec -- ^ codec to use 229 | -> BS.ByteString -- ^ compressed data 230 | -> Int -- ^ true compressed data length 231 | -> IO (Either T.Text (BS.ByteString, Int)) 232 | decompressChk codec inp inpl 233 | | codec == None = return $ Right (inp, inpl) 234 | | otherwise = do 235 | dec <- decompress codec $ BSL.drop 4 $ bsFromStrict (BS.take inpl inp) 236 | case dec of 237 | Left e -> return $ Left $ "error decompressing data: " `T.append` e 238 | Right dec' -> return $ Right (bsToStrict dec', fromIntegral $ BSL.length dec') 239 | -------------------------------------------------------------------------------- /src/lib/Freenet/Companion.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Companion ( 5 | Companion, initCompanion, 6 | 7 | getChk, getSsk 8 | ) where 9 | 10 | import Control.Applicative ( (<$>) ) 11 | import Control.Concurrent ( forkIO ) 12 | import qualified Control.Concurrent.Lock as Lock 13 | import Control.Monad ( forever, void ) 14 | import qualified Data.ByteString as BS 15 | import qualified Data.Configurator as CFG 16 | import qualified Data.Configurator.Types as CFG 17 | import qualified Data.Text as T 18 | import Data.Text.Encoding ( decodeUtf8, encodeUtf8 ) 19 | import Network 20 | import System.IO 21 | 22 | import Freenet.Base64 23 | import Freenet.Chk 24 | import Freenet.Ssk 25 | import Freenet.Types 26 | 27 | data Companion = Companion 28 | { cHandle :: Handle 29 | , cLock :: Lock.Lock 30 | } 31 | 32 | initCompanion :: CFG.Config -> (ChkBlock -> IO ()) -> (SskBlock -> IO ()) -> IO Companion 33 | initCompanion cfg chkHandler sskHandler = do 34 | host <- CFG.require cfg "host" 35 | port <- CFG.require cfg "port" :: IO Int 36 | 37 | handle <- connectTo host (PortNumber $ fromIntegral port) 38 | hSetBuffering handle LineBuffering 39 | 40 | let breakSpace s = let (a, b) = T.break (== ' ') s in (a, T.stripStart b) 41 | 42 | -- parse companion's responses and offer them to Freenet core 43 | void $ forkIO $ forever $ do 44 | (what, args) <- breakSpace . decodeUtf8 <$> BS.hGetLine handle 45 | 46 | case what of 47 | "chk" -> do 48 | let 49 | (ktxt, rest) = breakSpace args 50 | (cstr, rest') = breakSpace rest 51 | (hstr, rest'') = breakSpace rest' 52 | (dstr, _) = breakSpace rest'' 53 | df = do 54 | key <- fromBase64' ktxt >>= mkKey 55 | calg <- fromBase64' cstr 56 | hdr <- fromBase64' hstr >>= mkChkHeader 57 | d <- fromBase64' dstr 58 | 59 | mkChkBlock key hdr d calg 60 | 61 | case df of 62 | Left e -> putStrLn $ "could not parse CHK found response: " ++ T.unpack e 63 | Right d -> chkHandler d 64 | 65 | "ssk" -> do 66 | let 67 | (ktxt, rest) = breakSpace args 68 | (pktxt, rest') = breakSpace rest 69 | (hstr, rest'') = breakSpace rest' 70 | df = do 71 | loc <- fromBase64' ktxt >>= mkKey 72 | pubkey <- fromBase64' pktxt >>= mkPubKey 73 | hdr <- fromBase64' hstr >>= mkSskHeader 74 | d <- fromBase64' rest'' 75 | mkSskBlock loc hdr d pubkey 76 | 77 | case df of 78 | Left e -> putStrLn $ "could not parse SSK found response: " ++ T.unpack e 79 | Right d -> sskHandler d 80 | 81 | x -> print $ "strange companion response " ++ T.unpack x 82 | 83 | lck <- Lock.new 84 | return $ Companion handle lck 85 | 86 | getChk :: Companion -> ChkRequest -> IO () 87 | getChk comp (ChkRequest k a) = 88 | let msg = T.intercalate " " ["getchk", toBase64' $ unKey k, T.pack $ show a, "\n"] 89 | in Lock.with (cLock comp) $ BS.hPut (cHandle comp) $ encodeUtf8 msg 90 | 91 | getSsk :: Companion -> SskRequest -> IO () 92 | getSsk comp (SskRequest hpk ehd e) = Lock.with (cLock comp) $ BS.hPut (cHandle comp) $ encodeUtf8 msg where 93 | msg = T.intercalate " " ["getssk", k $ hpk, k ehd, T.pack $ show e, "\n"] 94 | k = toBase64' . unKey 95 | -------------------------------------------------------------------------------- /src/lib/Freenet/Compression.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Compression ( 5 | CompressionCodec(..), decompress 6 | ) where 7 | 8 | import qualified Codec.Compression.BZip as Bzip 9 | import qualified Codec.Compression.GZip as Gzip 10 | import qualified Control.Exception as CE 11 | import qualified Data.ByteString as BS 12 | import qualified Data.ByteString.Internal as BSI 13 | import qualified Data.ByteString.Lazy as BSL 14 | import Data.Void ( Void ) 15 | import Data.Word ( Word8 ) 16 | import Foreign.C.Types 17 | import Foreign.ForeignPtr 18 | import Foreign.Ptr 19 | import Foreign.Storable 20 | import qualified Data.Text as T 21 | 22 | import Utils 23 | 24 | -- | 25 | -- Supported compression algorithms 26 | data CompressionCodec = None | Gzip | Bzip2 | LZMA | LZMA_NEW deriving ( Eq, Ord, Show ) 27 | 28 | decompress :: CompressionCodec -> BSL.ByteString -> IO (Either T.Text BSL.ByteString) 29 | decompress comp cdata = case comp of 30 | None -> return $ Right $ cdata 31 | Bzip2 -> CE.catch (return $ Right $ Bzip.decompress $ BSL.pack [0x42, 0x5a] `BSL.append` cdata) ehandler 32 | Gzip -> CE.catch (return $ Right $ Gzip.decompress cdata) ehandler 33 | LZMA -> do 34 | lzma <- initLzma $ BS.pack [0x5d, 0x00, 0x00, 0x10, 0x00] 35 | dec <- decodeLzma lzma $ bsToStrict cdata 36 | return $ Right $ bsFromStrict dec 37 | LZMA_NEW -> do 38 | let (hdr, cd) = BSL.splitAt 5 cdata 39 | lzma <- initLzma $ bsToStrict hdr 40 | dec <- decodeLzma lzma $ bsToStrict cd 41 | return $ Right $ bsFromStrict dec 42 | 43 | ehandler :: CE.ErrorCall -> IO (Either T.Text BSL.ByteString) 44 | ehandler e = return $ Left $ "decompression failed: " `T.append` (T.pack $ show e) 45 | 46 | ---------------------------------------------------------------------------------- 47 | -- LZMA 48 | ---------------------------------------------------------------------------------- 49 | 50 | newtype LzmaDec = LzmaDec (ForeignPtr Void) 51 | 52 | initLzma :: BS.ByteString -> IO LzmaDec 53 | {-# NOINLINE initLzma #-} 54 | initLzma props 55 | | BS.length props /= 5 = error "props must be 5 bytes" 56 | | otherwise = do 57 | let (pfptr, poff, _) = BSI.toForeignPtr props 58 | dec <- withForeignPtr pfptr $ \pptr -> do 59 | c_lzma_dec_init (pptr `plusPtr` poff) 60 | fptr <- newForeignPtr c_lzma_dec_free dec 61 | return $ LzmaDec fptr 62 | 63 | decodeLzma :: LzmaDec -> BS.ByteString -> IO BS.ByteString 64 | {-# NOINLINE decodeLzma #-} 65 | decodeLzma (LzmaDec fpdec) input = withForeignPtr fpdec $ \dec -> do 66 | let 67 | (ifptr, ioff, ilen) = BSI.toForeignPtr input 68 | osize <- mallocForeignPtr 69 | od <- withForeignPtr ifptr $ \iptr -> 70 | withForeignPtr osize $ \os -> do 71 | c_lzma_decode dec (iptr `plusPtr` ioff) (fromIntegral ilen) os 72 | 73 | ofptr <- newForeignPtr BSI.c_free_finalizer od 74 | osize' <- withForeignPtr osize peek 75 | return $ BSI.fromForeignPtr ofptr 0 (fromIntegral osize') 76 | 77 | foreign import ccall "lzma.h lzma_dec_init" 78 | c_lzma_dec_init :: Ptr a -> IO (Ptr Void) 79 | 80 | foreign import ccall "lzma.h &lzma_dec_free" 81 | c_lzma_dec_free :: FunPtr (Ptr Void -> IO ()) 82 | 83 | -- void* lzma_decode(struct lzma_dec_state *state, void *src, size_t src_len, size_t *dest_len) { 84 | foreign import ccall "lzma.h lzma_decode" 85 | c_lzma_decode :: Ptr Void 86 | -> Ptr Word8 -> CSize 87 | -> Ptr CSize 88 | -> IO (Ptr Word8) 89 | 90 | -------------------------------------------------------------------------------- /src/lib/Freenet/Fetch.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | -- | 5 | -- High level data fetching. This includes splitfile reassembly, 6 | -- handling of archives and retries. 7 | module Freenet.Fetch ( 8 | Resolver, fetchUri 9 | ) where 10 | 11 | import qualified Data.ByteString.Lazy as BSL 12 | import qualified Data.HashMap.Strict as Map 13 | import qualified Data.Text as T 14 | import System.Log.Logger 15 | 16 | import Freenet.Archive 17 | import Freenet.Metadata 18 | import Freenet.SplitFile 19 | import Freenet.URI 20 | import Requests 21 | import Utils 22 | 23 | data Resolver f = Resolver 24 | { resArchiveCache :: ! ArchiveCache 25 | , resFetch :: f 26 | } 27 | 28 | logI :: String -> IO () 29 | logI = infoM "freenet.fetch" 30 | 31 | logD :: String -> IO () 32 | logD = debugM "freenet.fetch" 33 | 34 | -- | 35 | -- Tries to fetch the specified URI, parses metadata if it's 36 | -- a control document, goes on fetching the referenced data, 37 | -- and finally returns everything 38 | fetchUri :: UriFetch f => Resolver f -> URI -> IO (Either T.Text BSL.ByteString) 39 | fetchUri res uri = do 40 | logI $ "fetching " ++ show uri 41 | 42 | db <- getUriData (resFetch res) uri 43 | 44 | case db of 45 | Left e -> return $ Left e 46 | Right (pt, ptl) -> let pt' = BSL.take (fromIntegral ptl) $ bsFromStrict pt 47 | in if isControlDocument uri 48 | then case parseMetadata pt' of 49 | Left e -> return $ Left e 50 | Right md -> resolvePath res (uriPath uri) md Nothing 51 | else return $ Right pt' 52 | 53 | -- | FIXME: watch out for infinite redirects 54 | resolvePath 55 | :: UriFetch f 56 | => Resolver f 57 | -> [T.Text] -- ^ path elements to be resolved 58 | -> Metadata -- ^ the metadata where we try to locate the entries in 59 | -> Maybe Archive -- ^ archive to resolve AIR etc. against 60 | -> IO (Either T.Text BSL.ByteString) -- ^ either we fail, or we locate the final redirect step 61 | 62 | -- redirect to default entry in manifest, which has a name of "" 63 | resolvePath res [] md@(Manifest _) arch = logI "redirecting to default entry" >> resolvePath res [""] md arch 64 | 65 | -- resolve in archive 66 | resolvePath _ _ (ArchiveInternalRedirect tgt _) (Just amap) = do 67 | logD $ "resolving AIR to " ++ show tgt 68 | case Map.lookup (T.unpack tgt) amap of 69 | Nothing -> return $ Left $ "could not locate \"" `T.append` tgt `T.append` "\" in archive" 70 | Just bs -> do 71 | logD $ "found " ++ show tgt ++ " in archive" 72 | return $ Right bs 73 | 74 | resolvePath fn ps (ArchiveMetadataRedirect tgt) (Just archive) = 75 | case Map.lookup (T.unpack tgt) archive of 76 | Nothing -> return $ Left $ "could not locate metadata " `T.append` tgt `T.append` " in archive" 77 | Just bs -> case parseMetadata bs of 78 | Left e -> return $ Left $ "error parsing metadata from archive: " `T.append` e 79 | Right md -> resolvePath fn ps md (Just archive) 80 | 81 | -- follow simple redirects 82 | resolvePath fn ps (SimpleRedirect _ (RedirectKey _ uri)) arch = do 83 | logD $ "following simple (key) redirect to " ++ show uri ++ " for " ++ show ps 84 | 85 | db <- getUriData (resFetch fn) uri 86 | 87 | case db of 88 | Left e -> return $ Left $ "failed to fetch data while following key redirect: " `T.append` e 89 | Right (pt, ptl) -> let pt' = BSL.take (fromIntegral ptl) $ bsFromStrict pt 90 | in if isControlDocument uri 91 | then case parseMetadata pt' of 92 | Left e -> return $ Left $ "error parsing metadata while following key redirect: " `T.append` e 93 | Right md -> resolvePath fn (uriPath uri ++ ps) md arch 94 | else return $ Right pt' 95 | 96 | resolvePath fn [] (SimpleRedirect _ (RedirectSplitFile sf)) _ = fetchSplitFile (resFetch fn) sf 97 | 98 | -- resolve path in manifest 99 | resolvePath fn (p:ps) md@(Manifest me) arch = do 100 | logD $ "resolving " ++ show p ++ " in manifest" 101 | 102 | case lookup p me of 103 | Nothing -> return $ Left $ "could not locate \"" `T.append` p `T.append` "\" in manifest" 104 | Just me' -> case me' of 105 | SymbolicShortlink tgt -> do 106 | logD $ "following SSL to " ++ show tgt 107 | resolvePath fn (tgt:ps) md arch 108 | x -> resolvePath fn ps x arch 109 | 110 | resolvePath fn ps (ArchiveManifest atgt atype _ _) _ = do 111 | logD $ "resolving archive manifest " ++ show atgt 112 | 113 | arch <- fetchArchive (resFetch fn) (resArchiveCache fn) atgt atype 114 | 115 | case arch of 116 | Left e -> return $ Left e 117 | Right emap -> do 118 | logD $ "archive entries: " ++ show (Map.keys emap) 119 | 120 | case Map.lookup ".metadata" emap of 121 | Nothing -> return $ Left $ T.pack $ "no .metadata entry found in TAR archive: " ++ show ps 122 | Just mdbs -> case parseMetadata mdbs of 123 | Right md -> resolvePath fn ps md (Just emap) 124 | x -> return $ Left $ "error parsing manifest from TAR archive: " `T.append` T.pack (show x) 125 | 126 | resolvePath fn ps (MultiLevel sf) arch = do 127 | sfc <- fetchSplitFile (resFetch fn) sf 128 | 129 | case sfc of 130 | Left e -> return $ Left $ "error fetching splitfile for MultiLevel metadata: " `T.append` e 131 | Right bs -> case parseMetadata bs of 132 | Left e -> return $ Left $ "error parsing MultiLevel metadata: " `T.append` e 133 | Right md -> resolvePath fn ps md arch 134 | 135 | -- give up resolving this path 136 | resolvePath _ ps md arch = return $ Left $ T.concat [ 137 | "cannot locate ", T.pack (show ps), " in ", T.pack (show md), " with archive ", T.pack $ show arch] 138 | 139 | -------------------------------------------------------------------------------- /src/lib/Freenet/Fproxy.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Fproxy ( 5 | fproxy 6 | ) where 7 | 8 | import qualified Data.Text as T 9 | import Data.Text.Encoding ( encodeUtf8 ) 10 | import Network.HTTP.Types.Status 11 | import qualified Network.Wai as WAI 12 | 13 | import Freenet.Fetch 14 | import Freenet.URI 15 | import Requests 16 | import Utils 17 | 18 | -- | error response 19 | er :: T.Text -> WAI.Response 20 | er msg = WAI.responseLBS status500 [] (bsFromStrict $ encodeUtf8 msg) 21 | 22 | fproxy :: UriFetch f => Resolver f -> WAI.Application 23 | fproxy fn req respond = do 24 | let 25 | path = T.intercalate "/" $ WAI.pathInfo req 26 | 27 | case parseUri path of 28 | Left e -> respond $ er e 29 | Right uri -> do 30 | doc <- fetchUri fn uri 31 | 32 | case doc of 33 | Left e -> respond $ er e 34 | Right bs -> do 35 | let 36 | headers = [] 37 | 38 | respond $ WAI.responseLBS status200 headers bs 39 | -------------------------------------------------------------------------------- /src/lib/Freenet/Insert.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.Insert ( 5 | InsertTarget(..), InsertData(..), insert 6 | ) where 7 | 8 | import Control.Concurrent ( forkIO ) 9 | import Control.Monad ( void ) 10 | import qualified Data.Binary as BIN 11 | import qualified Data.ByteString as BS 12 | import qualified Data.ByteString.Lazy as BSL 13 | import Data.Digest.Pure.SHA 14 | import qualified Data.Text as T 15 | 16 | import Freenet.Chk 17 | import Freenet.Metadata 18 | import Freenet.Types 19 | import Freenet.URI 20 | import Requests 21 | import Utils 22 | 23 | data InsertTarget 24 | = InsertCHK -- ^ CHK insert 25 | 26 | data InsertData 27 | = InsertDirect 28 | { insertDirectData :: BSL.ByteString 29 | , insertDirectMime :: T.Text 30 | } 31 | 32 | data InsertProgress = IP 33 | { chkTodo :: ! [ChkBlock] 34 | , _ipUri :: URI 35 | } deriving ( Show ) 36 | 37 | -- | 38 | -- Prepares some data for insert, either creating a single CHK 39 | -- block if it's 32kiB or less in size, or by creating a splitfile for 40 | -- larger chunks. 41 | packChk 42 | :: BSL.ByteString -- ^ the data to insert 43 | -> ([ChkBlock], BS.ByteString) -- ^ the (CHK blocks, SHA256(data), fetch URI) for the data. 44 | -- the *first* block contains metadata head, if any 45 | packChk d 46 | | BSL.length d <= chkDataSize = let blk = encryptChk (bsToStrict d) key in ([blk], unKey key) 47 | | otherwise = error "can't pack splitfiles yet" 48 | where 49 | key = mkKey' $ bsToStrict $ bytestringDigest $ sha256 d 50 | 51 | insert :: a -> InsertTarget -> InsertData -> IO InsertProgress 52 | insert _ InsertCHK (InsertDirect payload mime) = do 53 | let 54 | (blks, hash) = packChk payload 55 | duri = chkBlockUri (head blks) (mkKey' hash) 56 | md = Manifest [("", SimpleRedirect [(SHA256, hash)] (RedirectKey (Just mime) duri))] 57 | (mdblks, mdh) = packChk $ BIN.encode md 58 | mduri = CHK (chkBlockKey $ head mdblks) (mkKey' mdh) (mkChkExtra 3 (-1) True) [] 59 | ip = IP (blks ++ mdblks) mduri 60 | 61 | -- void $ forkIO $ mapM_ (insertChk ins) (chkTodo ip) 62 | error "Insert.insert is not implemented" 63 | return ip 64 | 65 | -------------------------------------------------------------------------------- /src/lib/Freenet/Pcfb.hs: -------------------------------------------------------------------------------- 1 | 2 | module Freenet.Pcfb ( 3 | 4 | -- * PCFB (Periodic Cipher Feed Back) Mode 5 | mkPCFB, pcfbEncipher, pcfbEncipherWord8, 6 | pcfbDecipher, pcfbDecipherWord8 7 | ) where 8 | 9 | import Control.Applicative ( (<*>), (<$>) ) 10 | import Control.Monad ( when ) 11 | import Control.Monad.ST.Safe 12 | import Data.Bits ( xor ) 13 | import qualified Data.ByteString as B 14 | import Data.STRef 15 | import qualified Data.Vector.Unboxed as UV 16 | import qualified Data.Vector.Unboxed.Mutable as UMV 17 | import Data.Word 18 | 19 | import qualified Freenet.Rijndael as RD 20 | -- import Utils 21 | 22 | -------------------------------------------------------------------------------- 23 | -- PCFB (Periodic Cipher Feed Back) mode 24 | -------------------------------------------------------------------------------- 25 | 26 | data PCFB s = MkPCFB 27 | { _pcfbCipher :: RD.Key 28 | , _pcfbFeedback :: UMV.MVector s Word8 29 | , _pcfbIdx :: STRef s Int 30 | } 31 | 32 | mkPCFB 33 | :: RD.Key -- ^ the underlying cipher to use 34 | -> B.ByteString -- ^ the IV (initialization vector) 35 | -> ST s (PCFB s) 36 | mkPCFB c iv 37 | | B.length iv /= 32 = error "mkPCFB: IV length must be 32" 38 | | otherwise = MkPCFB c <$> UV.thaw (bsToVec iv) <*> newSTRef 32 39 | 40 | pcfbRefill :: PCFB s -> ST s () 41 | pcfbRefill (MkPCFB c f i) = do 42 | pos <- readSTRef i 43 | when (UMV.length f == pos) $ do 44 | ff <- UV.freeze f 45 | fm' <- UV.thaw $ bsToVec $ RD.encipher c $ vecToBs ff 46 | UMV.copy f fm' 47 | writeSTRef i 0 48 | 49 | pcfbDecipherWord8 :: PCFB s -> Word8 -> ST s Word8 50 | pcfbDecipherWord8 pc@(MkPCFB _ f i) x = do 51 | pcfbRefill pc 52 | fpos <- readSTRef i 53 | ff <- UMV.read f fpos 54 | UMV.write f fpos x 55 | modifySTRef i (+1) 56 | return $! x `xor` ff 57 | 58 | pcfbDecipher :: PCFB s -> B.ByteString -> ST s B.ByteString 59 | pcfbDecipher pcfb b = B.pack <$> mapM (pcfbDecipherWord8 pcfb) (B.unpack b) 60 | 61 | pcfbEncipherWord8 :: PCFB s -> Word8 -> ST s Word8 62 | pcfbEncipherWord8 pc@(MkPCFB _ f i) x = do 63 | pcfbRefill pc 64 | fpos <- readSTRef i 65 | ff <- UMV.read f fpos 66 | modifySTRef i (+1) 67 | let result = x `xor` ff 68 | UMV.write f fpos result 69 | return $! result 70 | 71 | pcfbEncipher :: PCFB s -> B.ByteString -> ST s B.ByteString 72 | pcfbEncipher pcfb b = B.pack <$> mapM (pcfbEncipherWord8 pcfb) (B.unpack b) 73 | 74 | -- | converts a ByteString to an unboxed vector 75 | bsToVec :: B.ByteString -> UV.Vector Word8 76 | bsToVec bs = UV.generate (B.length bs) (B.index bs) 77 | 78 | vecToBs :: UV.Vector Word8 -> B.ByteString 79 | vecToBs v = B.pack $ UV.toList v 80 | -------------------------------------------------------------------------------- /src/lib/Freenet/Rijndael.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ForeignFunctionInterface #-} 2 | 3 | module Freenet.Rijndael ( 4 | initRijndael, 5 | Key, initKey, encipher 6 | ) where 7 | 8 | import qualified Data.ByteString as B 9 | import qualified Data.ByteString.Base16 as HEX 10 | import Data.ByteString.Internal 11 | import Data.ByteString.Unsafe 12 | import Foreign.C.String 13 | import Foreign.C.Types 14 | import Foreign.ForeignPtr 15 | import Foreign.Marshal.Alloc 16 | import Foreign.Ptr 17 | import System.IO.Unsafe ( unsafePerformIO ) 18 | 19 | newtype Key = Key B.ByteString 20 | 21 | instance Show Key where 22 | show (Key k) = show $ HEX.encode k 23 | 24 | bsToRows :: Int -> CUInt 25 | bsToRows bs 26 | | bs == 16 = 4 27 | | bs == 24 = 6 28 | | bs == 32 = 8 29 | | otherwise = error "unsupported block size (must be 16, 24 or 32 bytes)" 30 | 31 | -- | 32 | -- initializes some Rijndael tables and *must* be run before 33 | -- using any other functions from this module 34 | initRijndael :: IO () 35 | initRijndael = c_init_tables 36 | 37 | -- | initialize key for en- and decryption use 38 | initKey 39 | :: Int -- ^ block size (16, 24 or 32 bytes) 40 | -> B.ByteString -- ^ key (16, 24 or 32 bytes) 41 | -> Key -- ^ the resulting key schedule 42 | {-# NOINLINE initKey #-} 43 | initKey bs b 44 | | len == 16 = doInit 4 45 | | len == 24 = doInit 6 46 | | len == 32 = doInit 8 47 | | otherwise = error "initKey: wrong key size (must be 16, 24 or 32 bytes)" 48 | where 49 | len = B.length b :: Int 50 | doInit nk = unsafePerformIO $ unsafeUseAsCString b $ \ikey -> do 51 | keySize <- c_rijndael_sched_key_size 52 | ptr <- mallocBytes keySize 53 | c_rijn_sched_key ptr (castPtr ikey) (bsToRows bs) nk 54 | fptr <- newForeignPtr c_free_finalizer (castPtr ptr) 55 | return $ Key $ fromForeignPtr fptr 0 keySize 56 | 57 | encipher :: Key -> B.ByteString -> B.ByteString 58 | {-# NOINLINE encipher #-} 59 | encipher (Key k) b 60 | | B.length b /= 32 = error "encipher: wrong block size (must be 32 bytes)" 61 | | otherwise = unsafePerformIO $ create 32 $ \rp -> do 62 | let (kp, off, _) = toForeignPtr k in withForeignPtr kp $ \kpf -> 63 | let (ip, ioff, _) = toForeignPtr b in withForeignPtr ip $ \ipf -> 64 | c_rijn_encrypt (castPtr $ kpf `plusPtr` off) (castPtr $ ipf `plusPtr` ioff) (castPtr rp) 65 | 66 | foreign import ccall "rijndael.h init_tables" 67 | c_init_tables :: IO () 68 | 69 | foreign import ccall "rijndael.h rijndael_sched_key_size" 70 | c_rijndael_sched_key_size :: IO Int 71 | 72 | foreign import ccall "rijndael.h rijndael_init_key" 73 | c_rijn_sched_key :: Ptr Key -> CString -> CUInt -> CUInt -> IO () 74 | 75 | foreign import ccall "rijndael.h encrypt_block" 76 | c_rijn_encrypt :: Ptr Key -> CString -> CString -> IO () 77 | 78 | -------------------------------------------------------------------------------- /src/lib/Freenet/SplitFile.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.SplitFile ( 5 | SplitFile(..), SplitFileSegment(..), 6 | fetchSplitFile 7 | ) where 8 | 9 | import qualified Codec.FEC as FEC 10 | import Control.Concurrent ( forkIO ) 11 | import Control.Concurrent.STM 12 | import qualified Control.Exception as CE 13 | import Control.Monad ( replicateM_ ) 14 | import Data.Word 15 | import qualified Data.ByteString.Lazy as BSL 16 | import Data.Function ( on ) 17 | import Data.List ( sortBy ) 18 | import qualified Data.Text as T 19 | import System.Log.Logger 20 | import System.Random ( newStdGen, randomR ) 21 | 22 | import Freenet.Compression 23 | import Freenet.Mime 24 | import Freenet.URI 25 | import Requests 26 | 27 | data SplitFileSegment 28 | = SplitFileSegment 29 | { sfUri :: ! URI -- ^ the URI where this segment can be fetched 30 | , sfsData :: ! Bool -- ^ True if this is a data block, false if it's a check block 31 | } 32 | deriving ( Eq, Ord, Show ) 33 | 34 | data SplitFile = SplitFile 35 | { sfCompression :: CompressionCodec -- ^ the compression codec used by this splitfile 36 | , sfCompressedSize :: Word64 -- ^ size of compressed data, equals original size if not compressed 37 | , sfOriginalSize :: Word64 -- ^ size of original data before compression was applied 38 | , sfSegments :: [SplitFileSegment] -- ^ the segments this split consists of 39 | , sfMime :: Maybe Mime -- ^ MIME type of the target data 40 | } deriving ( Eq, Ord, Show ) 41 | 42 | logD :: String -> IO () 43 | logD = debugM "freenet.splitfile" 44 | 45 | -- | 46 | -- Fetch the contents of a SplitFile. A SplitFile consists of @k@ 47 | -- data blocks plus a number for FEC check blocks. We accomplish this by 48 | -- fetching a random subset of @k@ blocks, and then run the FEC codec to 49 | -- reassemble the original data. 50 | fetchSplitFile :: (UriFetch a) => a -> SplitFile -> IO (Either T.Text BSL.ByteString) 51 | fetchSplitFile fn (SplitFile comp dlen _ segs _) = do -- TODO: we're not returning the MIME and ignoring the original length 52 | 53 | let 54 | total = length segs 55 | k = length $ filter (\(SplitFileSegment _ isd) -> isd) segs -- # of primary blocks 56 | blist = zip [0..] $ map (\(SplitFileSegment uri _) -> uri) segs 57 | 58 | logD $ "fetch split file " ++ show total ++ " " ++ show k 59 | 60 | todo <- newTVarIO blist 61 | done <- newTVarIO [] 62 | rng <- newStdGen >>= newTVarIO 63 | running <- newTVarIO k 64 | 65 | -- try to fetch k randomly chosen blocks 66 | let 67 | download = do 68 | -- choose block to fetch 69 | next <- atomically $ do 70 | todo' <- readTVar todo 71 | done' <- readTVar done 72 | rng' <- readTVar rng 73 | 74 | if null todo' || length done' == k 75 | then return Nothing 76 | else let (idx, rng'') = randomR (0, length todo' - 1) rng' 77 | (ys, zs) = splitAt idx todo' 78 | in do 79 | writeTVar todo $ ys ++ tail zs 80 | writeTVar rng rng'' 81 | return $ Just $ head zs 82 | 83 | case next of 84 | Nothing -> return () 85 | Just (idx, uri) -> do 86 | result <- getUriData fn uri 87 | case result of 88 | Left _ -> return () 89 | Right (blk, _) -> atomically $ modifyTVar' done ((:) (idx, blk)) 90 | 91 | download 92 | 93 | replicateM_ k $ forkIO $ download >> atomically (modifyTVar' running pred) 94 | 95 | -- wait until k blocks have been downloaded 96 | 97 | fetched <- atomically $ do 98 | done' <- readTVar done 99 | if length done' >= k 100 | then return $ Right (sortBy (compare `on` fst) done') 101 | else do 102 | running' <- readTVar running 103 | todo' <- readTVar todo 104 | 105 | if length done' + length todo' + running' >= k 106 | then retry 107 | else return $ Left $ "could not download enough blocks " `T.append` T.pack (show (length done', length todo', running')) 108 | 109 | case fetched of 110 | Left e -> return $ Left e 111 | Right bs -> do 112 | fec <- CE.catch 113 | (return $ Right $ FEC.decode (FEC.fec k total) $ take k bs) 114 | (\e -> return $ Left $ T.pack $ show ( e :: CE.ErrorCall)) 115 | 116 | case fec of 117 | Left e -> return $ Left e 118 | Right bs'' -> decompress comp $ BSL.take (fromIntegral dlen) $ BSL.fromChunks bs'' 119 | -------------------------------------------------------------------------------- /src/lib/Freenet/Ssk.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE MultiParamTypeClasses, OverloadedStrings #-} 3 | 4 | module Freenet.Ssk ( 5 | SskRequest(..), SskBlock(..), mkSskBlock, 6 | sskLocation, sskLocation', sskEncryptDocname, 7 | decompressSsk, 8 | 9 | -- * SSK Headers 10 | SskHeader, mkSskHeader, sskDataSize, sskHeaderSize, 11 | 12 | -- * DSA 13 | PubKey, mkPubKey, hashPubKey, pubKeySize 14 | ) where 15 | 16 | import Control.Applicative ( (<$>), (<*>) ) 17 | import Control.Monad ( mzero ) 18 | import Control.Monad.ST ( runST ) 19 | import qualified Data.Aeson as JSON 20 | import Data.Binary 21 | import Data.Binary.Get 22 | import Data.Bits ( (.&.), (.|.), shiftL ) 23 | import qualified Data.ByteString as BS 24 | import qualified Data.ByteString.Lazy as BSL 25 | import Data.Digest.Pure.SHA 26 | import Data.Monoid ( (<>) ) 27 | import qualified Data.Text as T 28 | import Data.Text.Encoding ( encodeUtf8 ) 29 | 30 | import qualified Crypto.PubKey.DSA as DSA 31 | import Data.Binary.Put 32 | 33 | import Freenet.Base64 34 | import Freenet.Compression 35 | import Freenet.Pcfb 36 | import qualified Freenet.Rijndael as RD 37 | import Freenet.Types 38 | import Utils 39 | 40 | -------------------------------------------------------------------------------- 41 | -- Header 42 | -------------------------------------------------------------------------------- 43 | 44 | -- | 45 | -- the header of an SSK data block, consisting of: 46 | -- 0 word16 : hash algorithm, must be SHA256 47 | -- 2 word16 : symmetric cipher identifier 48 | -- 4 32 bytes : E(H(docname)) 49 | -- 36 36 bytes : encrypted part of the headers 50 | -- 72 32 bytes : signature parameter R 51 | -- 104 32 bytes : signature parameter S 52 | -- 136 : bytes total length 53 | newtype SskHeader = SskHeader { unSskHeader :: BS.ByteString } deriving ( Eq ) 54 | 55 | sskHeaderSize :: Int 56 | sskHeaderSize = 136 57 | 58 | instance Show SskHeader where 59 | show (SskHeader bs) = T.unpack $ toBase64' bs 60 | 61 | instance Binary SskHeader where 62 | put (SskHeader h) = putByteString h 63 | get = SskHeader <$> getByteString sskHeaderSize 64 | 65 | mkSskHeader :: BS.ByteString -> Either T.Text SskHeader 66 | mkSskHeader bs 67 | | BS.length bs == sskHeaderSize = Right $ SskHeader bs 68 | | otherwise = Left $ "SSK header length must be 136 bytes, got " `T.append` T.pack (show $ BS.length bs) 69 | 70 | sskHeaderHashId :: SskHeader -> Word16 71 | sskHeaderHashId h = runGet getWord16be $ bsFromStrict $ unSskHeader h 72 | 73 | -- | 74 | -- Returns the signature (r, s) parameter for verifying the payload. 75 | sskHeaderRS :: SskHeader -> (Integer, Integer) 76 | sskHeaderRS h = (r, s) where 77 | r = bsToPosI $ BS.take 32 $ BS.drop 72 $ unSskHeader h 78 | s = bsToPosI $ BS.take 32 $ BS.drop 104 $ unSskHeader h 79 | 80 | -- | 81 | -- Returns the encrypted part of the header, which holds the 82 | -- symmetric key needed to decrypt the data, and also the original 83 | -- length of the plaintext. 84 | sskHeaderEncrypted :: SskHeader -> BS.ByteString 85 | sskHeaderEncrypted h = BS.take 36 $ BS.drop 36 $ unSskHeader h 86 | 87 | sskHeaderEHDocname :: SskHeader -> BS.ByteString 88 | sskHeaderEHDocname h = BS.take 32 $ BS.drop 4 $ unSskHeader h 89 | 90 | -------------------------------------------------------------------------------------- 91 | -- Data 92 | -------------------------------------------------------------------------------------- 93 | -- | size of the SSK payload 94 | sskDataSize :: Int 95 | sskDataSize = 1024 96 | 97 | data SskBlock = SskBlock !Key !PubKey !SskHeader !BS.ByteString 98 | 99 | instance Show SskBlock where 100 | show (SskBlock k _ h d) = "SskBlock {k=" ++ show k ++ ", h=" ++ (show h) ++ ", len=" ++ (show $ BS.length d) ++ "}" 101 | 102 | instance StorePersistable SskBlock where 103 | storeSize = \_ -> 32 + pubKeySize + sskHeaderSize + sskDataSize 104 | storePut = \(SskBlock k pk h d) -> put k >> put pk >> put h >> putByteString d 105 | storeGet = do 106 | (k, pk, h, d) <- (,,,) <$> get <*> get <*> get <*> getByteString sskDataSize 107 | case mkSskBlock k h d pk of 108 | Right df -> return df 109 | Left e -> fail $ T.unpack e 110 | 111 | instance DataBlock SskBlock where 112 | dataBlockLocation (SskBlock loc _ _ _) = freenetLocation loc $ (2 `shiftL` 8) + 2 113 | decryptDataBlock = decryptSskBlock 114 | 115 | instance Binary SskBlock where 116 | put = storePut 117 | get = storeGet 118 | 119 | instance JSON.ToJSON SskBlock where 120 | toJSON (SskBlock l pk h d) = JSON.object 121 | [ "location" JSON..= l 122 | , "header" JSON..= (toBase64' . unSskHeader) h 123 | , "data" JSON..= (toBase64') d 124 | , "pubKey" JSON..= pk 125 | ] 126 | 127 | mkSskBlock 128 | :: Key -- ^ location 129 | -> SskHeader -- ^ header 130 | -> BS.ByteString -- ^ payload 131 | -> PubKey -- ^ public key needed for verifying the signature 132 | -> Either T.Text SskBlock 133 | mkSskBlock k h d pk 134 | | sskHeaderHashId h /= 1 = Left "hash must be SHA-256" 135 | | DSA.verify dsaMod (unPublicKey pk) sig overallHash = result -- yes, we really have to try two times because Freenet has this 136 | | DSA.verify id (unPublicKey pk) sig overallHash = result -- strange bug I don't really understand. :-/ 137 | | otherwise = Left "signature did not verify" 138 | where 139 | result = Right $ SskBlock k pk h d 140 | overallHash = bsToStrict $ bytestringDigest $ sha256 $ BSL.fromChunks [hashHeader, dataHash] 141 | dataHash = bsToStrict $ bytestringDigest $ sha256 $ bsFromStrict d 142 | hashHeader = BS.take 72 $ unSskHeader h 143 | sig = uncurry DSA.Signature $ sskHeaderRS h 144 | 145 | decryptSskBlock :: SskBlock -> Key -> Either T.Text (BS.ByteString, Int) 146 | decryptSskBlock (SskBlock _ _ h d) key 147 | -- | calg /= 2 = Left $ T.pack $ "unknown SSK crypto algorithm " ++ show calg 148 | | dataLength < (fromIntegral origDataLength) = Left $ "data length mismatch" 149 | | otherwise = Right (plaintext, fromIntegral origDataLength) -- BSL.take (fromIntegral origDataLength) plaintext 150 | where 151 | dataLength = BS.length plaintext 152 | plaintext = runST $ do 153 | pcfb <- mkPCFB docKey docIv 154 | pcfbDecipher pcfb d 155 | 156 | docKey = RD.initKey 32 $ BS.take 32 plainHeader 157 | docIv = BS.take 32 plainHeader -- TODO: is this really a good idea? Freenet does so, we have no choice anyway, but still 158 | origDataLength = (runGet getWord16be $ bsFromStrict $ BS.take 2 $ BS.drop 32 plainHeader) .&. 0x7fff 159 | 160 | plainHeader = runST $ do 161 | pcfb <- mkPCFB headerKey headerIv 162 | pcfbDecipher pcfb $ sskHeaderEncrypted h 163 | 164 | headerKey = RD.initKey 32 $ unKey key 165 | headerIv = sskHeaderEHDocname h 166 | 167 | -- | 168 | -- for SSKs, the routing key is determined by 169 | -- H(PK) and the encrypted document name's hash E(H(docname)) 170 | sskLocation 171 | :: Key -- ^ the public key hash 172 | -> Key -- ^ the crypto key (required to encrypt the docname) 173 | -> T.Text -- ^ the document name 174 | -> Key -- ^ the resulting routing key 175 | sskLocation hpk ckey docname = sskLocation' hpk ehd where 176 | ehd = sskEncryptDocname ckey docname 177 | 178 | -- | 179 | -- determines the location for a SSK document 180 | sskLocation' 181 | :: Key -- ^ hash (public key) 182 | -> Key -- ^ encrypt ( hash ( docname ) ) 183 | -> Key -- ^ routing key 184 | sskLocation' hpk ehd = mkKey' $ bsToStrict $ bytestringDigest $ sha256 $ BSL.fromChunks [unKey ehd, unKey hpk] 185 | 186 | -- | 187 | -- encrypts the hash of an SSK document name. this is needed 188 | -- to determine the location of an SSK document 189 | sskEncryptDocname 190 | :: Key -- ^ the crypto key (second part of the SSK URI) 191 | -> T.Text -- ^ the document name (first path element of SSK URI) 192 | -> Key -- ^ the encrypted document name 193 | sskEncryptDocname ckey docname = mkKey' $ RD.encipher rjk dnh where 194 | rjk = RD.initKey 32 $ unKey ckey -- prepare encryption key 195 | dnh = bsToStrict $ bytestringDigest $ sha256 $ bsFromStrict (encodeUtf8 docname) 196 | 197 | ----------------------------------------------------------------------------------------------- 198 | -- DSA 199 | ----------------------------------------------------------------------------------------------- 200 | 201 | putMpi :: Integer -> Put 202 | putMpi i = putWord16be (len * 8 - 8) >> putByteString bs where 203 | bs = i2bs i 204 | len = fromIntegral $ BS.length bs 205 | 206 | getMpi :: Get Integer 207 | getMpi = do 208 | len <- (\x -> (x + 8) `div` 8) <$> getWord16be 209 | bs <- getByteString $ fromIntegral len 210 | return $ bs2i bs 211 | 212 | putGroup :: DSA.Params -> Put 213 | putGroup (DSA.Params p g q) = putMpi p >> putMpi q >> putMpi g 214 | 215 | getGroup :: Get DSA.Params 216 | getGroup = do 217 | p <- getMpi 218 | q <- getMpi 219 | g <- getMpi 220 | return $ DSA.Params p g q 221 | 222 | newtype PubKey = PK { unPublicKey :: DSA.PublicKey } deriving ( Show ) 223 | 224 | instance Binary PubKey where 225 | put = putPk 226 | get = do 227 | before <- bytesRead 228 | grp <- getGroup 229 | y <- getMpi 230 | after <- bytesRead 231 | skip $ pubKeySize - (fromIntegral $ after - before) 232 | return $ PK (DSA.PublicKey grp y) 233 | 234 | instance JSON.ToJSON PubKey where 235 | toJSON (PK (DSA.PublicKey (DSA.Params p g q) y)) = 236 | JSON.object [ "p" JSON..= toBase64' p 237 | , "g" JSON..= toBase64' g 238 | , "q" JSON..= toBase64' q 239 | , "y" JSON..= toBase64' y 240 | ] 241 | 242 | mkPubKey :: BS.ByteString -> Either T.Text PubKey 243 | mkPubKey bs = case decodeOrFail (bsFromStrict bs) of 244 | Left (_, _, e) -> Left $ T.pack e 245 | Right (_, _, pk) -> Right pk 246 | 247 | pubKeySize :: Int 248 | pubKeySize = 1024 249 | 250 | -- | 251 | -- put without padding 252 | putPublicKey :: PubKey -> Put 253 | putPublicKey (PK (DSA.PublicKey grp y)) = putGroup grp >> putMpi y 254 | 255 | hashPubKey :: PubKey -> Key 256 | hashPubKey pk = mkKey' $ bsToStrict $ bytestringDigest $ sha256 $ runPut $ putPublicKey pk 257 | 258 | putPk :: PubKey -> Put 259 | putPk pk = putLazyByteString d >> putLazyByteString pad where 260 | pad = BSL.replicate (fromIntegral $ pubKeySize - (fromIntegral $ BSL.length d)) 0 261 | d = runPut $ putPublicKey pk 262 | 263 | dsaMod :: BS.ByteString -> BS.ByteString 264 | dsaMod bs = padBs (BS.length bs) (i2bs i') where 265 | i' = i .&. mask 266 | i = bs2posI bs 267 | mask = 2 ^ (255 :: Integer) - 1 268 | 269 | padBs :: Int -> BS.ByteString -> BS.ByteString 270 | padBs pl b 271 | | l == pl = b 272 | | l > pl = error "already bigger than padded length" 273 | | otherwise = BS.replicate (pl - l) 0 <> b 274 | where 275 | l = BS.length b 276 | 277 | bs2posI :: BS.ByteString -> Integer 278 | bs2posI = BS.foldl' (\a b -> (256 * a) .|. (fromIntegral b)) 0 279 | 280 | data SskRequest = SskRequest 281 | { sskReqPkh :: ! Key 282 | , sskReqEhd :: ! Key 283 | , sskReqAlg :: ! Word8 284 | } deriving ( Show ) 285 | 286 | instance Binary SskRequest where 287 | put (SskRequest pk eh a) = put pk >> put eh >> put a 288 | get = SskRequest <$> get <*> get <*> get 289 | 290 | instance DataRequest SskRequest where 291 | dataRequestLocation (SskRequest pkh ehd alg) = freenetLocation (sskLocation' pkh ehd) ((2 `shiftL` 8) + (fromIntegral alg)) 292 | 293 | instance JSON.FromJSON SskRequest where 294 | parseJSON (JSON.Object v) = SskRequest 295 | <$> v JSON..: "pubKeyHash" 296 | <*> v JSON..: "ehDocName" 297 | <*> v JSON..: "algorithm" 298 | parseJSON _ = mzero 299 | 300 | decompressSsk :: CompressionCodec -> BSL.ByteString -> IO (Either T.Text BSL.ByteString) 301 | decompressSsk codec inp = decompress codec $ BSL.drop 2 inp 302 | -------------------------------------------------------------------------------- /src/lib/Freenet/Store.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings, PackageImports, ScopedTypeVariables #-} 3 | 4 | module Freenet.Store ( 5 | StoreFile, mkStoreFile, shutdownStore, 6 | putData, getData 7 | ) where 8 | 9 | import qualified Control.Concurrent.ReadWriteLock as Lock 10 | import Control.Concurrent.STM 11 | import Control.Monad ( liftM, unless, void, when ) 12 | import Data.Aeson 13 | import Data.Binary 14 | import Data.Binary.Get ( runGetOrFail ) 15 | import Data.Binary.Put ( runPut ) 16 | import qualified Data.ByteString as BS 17 | import qualified Data.ByteString.Internal as BSI 18 | import qualified Data.ByteString.Unsafe as BSU 19 | import Data.Hashable 20 | import Foreign.Ptr ( castPtr, plusPtr ) 21 | import System.Directory ( renameFile ) 22 | import System.IO.Error ( catchIOError ) 23 | import System.Log.Logger 24 | import System.Posix.Files ( setFdSize ) 25 | import System.Posix.IO ( OpenMode(..), defaultFileFlags, openFd, closeFd ) 26 | import "unix-bytestring" System.Posix.IO.ByteString ( fdPreadBuf, fdPwriteBuf ) 27 | import System.Posix.Types ( ByteCount, Fd, FileOffset ) 28 | import System.Random ( randomRIO ) 29 | 30 | import Freenet.Types 31 | import Statistics 32 | import Types 33 | import Utils 34 | 35 | ---------------------------------------------------------------- 36 | -- store types 37 | ---------------------------------------------------------------- 38 | 39 | type ReadOffset f = FileOffset -> IO (Maybe f) 40 | type WriteOffset f = FileOffset -> f -> IO () 41 | 42 | data StoreFile f = StoreFile 43 | { sfLock :: ! Lock.RWLock -- ^ for accessing the handle 44 | , sfFileName :: ! FilePath 45 | , sfEntrySize :: ! Int 46 | , sfEntryCount :: ! Int 47 | , sfReads :: ! (TVar Word64) -- ^ total number of reads 48 | , sfReadSuccess :: ! (TVar Word64) -- ^ number of successful reads 49 | , sfHistogram :: ! THistogram 50 | , sfFd :: ! Fd -- ^ file descriptor 51 | , sfReadOffset :: ! (ReadOffset f) -- ^ try to read data from the given offset 52 | , sfWriteOffset :: ! (WriteOffset f) -- ^ actually write data to the given offset 53 | } 54 | 55 | instance ToStateJSON (StoreFile f) where 56 | toStateJSON sf = do 57 | r <- readTVar $ sfReads sf 58 | s <- readTVar $ sfReadSuccess sf 59 | h <- toStateJSON $ sfHistogram sf 60 | 61 | return $ object 62 | [ "entrySize" .= sfEntrySize sf 63 | , "capacity" .= sfEntryCount sf 64 | , "readRequests" .= r 65 | , "readSuccess" .= s 66 | , "histogram" .= h 67 | ] 68 | 69 | logI :: String -> IO () 70 | logI = infoM "freenet.store" 71 | 72 | mkStoreFile :: forall f. StorePersistable f => FilePath -> Int -> IO (StoreFile f) 73 | mkStoreFile fileName count = do 74 | 75 | let 76 | entrySize = 1 + storeSize (undefined :: f) 77 | fileSize = count * fromIntegral entrySize 78 | 79 | handle <- openFd fileName ReadWrite (Just 0x0600) defaultFileFlags 80 | setFdSize handle $ fromIntegral fileSize 81 | 82 | rds <- newTVarIO 0 83 | scs <- newTVarIO 0 84 | lck <- Lock.new 85 | (needScan, hist) <- readStats fileName 86 | 87 | let sf = StoreFile lck fileName entrySize count rds scs hist handle 88 | (readOffset sf) (writeOffset sf) 89 | 90 | when needScan $ void $ scanStore sf 91 | 92 | return sf 93 | 94 | readStats :: FilePath -> IO (Bool, THistogram) 95 | readStats fp = do 96 | hist <- catchIOError doRead handler >>= \(ns, x) -> do 97 | h <- atomically $ thawHistogram x 98 | return (ns, h) 99 | 100 | catchIOError (renameFile fname $ fname ++ ".bak") $ const $ return () 101 | return hist 102 | where 103 | fname = fp ++ "-histogram" 104 | doRead = decodeFile fname >>= \x -> return (False, x) 105 | handler e = do 106 | logI $ "error reading histogram: " ++ show e 107 | return (True, mkHistogram 256) 108 | 109 | shutdownStore :: StoreFile f -> IO () 110 | shutdownStore sf = do 111 | logI "shutting down store" 112 | 113 | closeFd $ sfFd sf 114 | hist <- atomically $ freezeHistogram (sfHistogram sf) 115 | encodeFile (sfFileName sf ++ "-histogram") hist 116 | 117 | scanStore :: (DataBlock f) => StoreFile f -> IO () 118 | scanStore sf = mapM_ checkOffset offsets 119 | where 120 | ecount = fromIntegral $ sfEntryCount sf 121 | esize = fromIntegral $ sfEntrySize sf 122 | offsets = [0, esize .. (ecount - 1) * esize] :: [FileOffset] 123 | 124 | checkOffset o = do 125 | r <- sfReadOffset sf o 126 | 127 | case r of 128 | Nothing -> return () 129 | Just df -> atomically $ histInc (sfHistogram sf) $ dataBlockLocation df 130 | 131 | locOffsets :: StoreFile f -> Key -> [FileOffset] 132 | locOffsets sf loc = map (\i -> (fromIntegral i `rem` fromIntegral count) * fromIntegral entrySize) [idx .. idx + 5] 133 | where 134 | idx = (fromIntegral $ hash loc :: Word32) `rem` fromIntegral count 135 | count = sfEntryCount sf 136 | entrySize = sfEntrySize sf 137 | 138 | pwriteBS :: Fd -> BS.ByteString -> FileOffset -> IO () 139 | pwriteBS fd bs o = BSU.unsafeUseAsCStringLen bs $ \(buf, len) -> go buf len 0 where 140 | go buf len done = fdPwriteBuf fd woff remain roff >>= \written -> unless 141 | ((done + fromIntegral written) == fromIntegral len) 142 | (go buf len $ done + fromIntegral written) 143 | where 144 | woff = castPtr buf `plusPtr` done 145 | remain = fromIntegral len - fromIntegral done 146 | roff = o + fromIntegral done 147 | 148 | preadBS :: Fd -> FileOffset -> ByteCount -> IO BS.ByteString 149 | preadBS fd o len = BSI.create (fromIntegral len) $ \buf -> do 150 | let 151 | go done = fdPreadBuf fd (castPtr buf `plusPtr` fromIntegral done) (len - done) (o + fromIntegral done) >>= \rd -> 152 | if rd == 0 153 | then error "hit EOF while reading" 154 | else unless ((done + fromIntegral rd) == len) $ 155 | go (done + fromIntegral rd) 156 | 157 | go 0 158 | 159 | -- | Actually write data to the given offset and update stats. 160 | writeOffset :: StorePersistable f => StoreFile f -> FileOffset -> f -> IO () 161 | writeOffset sf o df = do 162 | Lock.withWrite (sfLock sf) $ pwriteBS (sfFd sf) (bsToStrict $ runPut doPut) o 163 | logI $ show loc ++ " written at " ++ show o 164 | atomically $ histInc (sfHistogram sf) loc 165 | where 166 | loc = dataBlockLocation df 167 | doPut = putWord8 1 >> storePut df 168 | 169 | putData :: StorePersistable f => StoreFile f -> f -> IO () 170 | putData sf df = go [] (locOffsets sf loc) where 171 | loc = dataBlockLocation df 172 | -- there are no free slots, we must decide if and which we want to overwrite 173 | go olds [] = do 174 | p <- randomRIO (0, 1) :: IO Double 175 | unless (p > 0.1) $ do -- TODO: use something clever instead of 0.1, maybe try to match the incoming req. distribution? 176 | -- we want to overwrite 177 | (o, loc') <- liftM (olds !!) $ randomRIO (0, length olds - 1) 178 | atomically $ histDec (sfHistogram sf) loc' 179 | sfWriteOffset sf o df 180 | 181 | -- we still have some candidate slots which are possibly empty, check them 182 | go olds (o:os) = sfReadOffset sf o >>= \old -> case old of 183 | Nothing -> sfWriteOffset sf o df -- place data in previously empty slot 184 | Just df' -> let loc' = dataBlockLocation df' -- we're done if the data is already there 185 | in unless (loc == loc') $ go ((o, loc'):olds) os 186 | 187 | readOffset :: StorePersistable f => StoreFile f -> FileOffset -> IO (Maybe f) 188 | readOffset sf offset = do 189 | d <- Lock.withRead (sfLock sf) $ preadBS (sfFd sf) offset (fromIntegral $ sfEntrySize sf) 190 | 191 | case runGetOrFail doGet (bsFromStrict d) of 192 | Left (_, _, _) -> return Nothing 193 | Right (_, _, df) -> return $ Just df 194 | 195 | where 196 | doGet = getWord8 >>= \flags -> case flags of 197 | 1 -> storeGet 198 | _ -> fail "empty slot" 199 | 200 | getData :: StorePersistable f => StoreFile f -> Key -> IO (Maybe f) 201 | getData sf key = doRead key >>= \result -> countRead result >> return result where 202 | 203 | countRead r = atomically $ modifyTVar' (sfReads sf) (+1) >> case r of 204 | Nothing -> return () 205 | Just _ -> modifyTVar' (sfReadSuccess sf) (+1) 206 | 207 | doRead loc = go (locOffsets sf loc) where 208 | go [] = return Nothing -- no offsets left, terminate 209 | go (o:os) = do 210 | d <- sfReadOffset sf o 211 | 212 | case d of 213 | Nothing -> go os 214 | Just df -> if loc == dataBlockLocation df 215 | then return $ Just df 216 | else go os 217 | -------------------------------------------------------------------------------- /src/lib/Freenet/Types.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings, MultiParamTypeClasses #-} 3 | 4 | module Freenet.Types ( 5 | Key(..), mkKey, mkKey', unKey, 6 | 7 | DataRequest(..), 8 | DataBlock(..), 9 | freenetLocation, 10 | 11 | StorePersistable(..) 12 | ) where 13 | 14 | import Control.Applicative ( (<$>), pure ) 15 | import Control.Monad ( mzero, replicateM ) 16 | import qualified Data.Aeson as JSON 17 | import Data.Binary 18 | import Data.Bits ( shiftL, testBit, xor ) 19 | import qualified Data.ByteString as BS 20 | import qualified Data.ByteString.Lazy as BSL 21 | import Data.Digest.Pure.SHA ( bytestringDigest, sha256 ) 22 | import Data.Hashable 23 | import qualified Data.Text as T 24 | import Test.QuickCheck 25 | 26 | import Freenet.Base64 27 | import Types 28 | import Utils 29 | 30 | newtype Key = Key { unKey' :: Id } deriving ( Eq, Ord ) 31 | 32 | unKey :: Key -> BS.ByteString 33 | unKey = unId . unKey' 34 | 35 | keySize :: Int 36 | keySize = 32 37 | 38 | instance Show Key where 39 | show (Key i) = T.unpack $ toBase64' $ unId i 40 | 41 | instance HasLocation Key where 42 | hasLocToInteger k = hasLocToInteger $ unKey' k 43 | hasLocMax = Key $ (hasLocMax :: Id) 44 | 45 | instance Hashable Key where 46 | hashWithSalt s k = hashWithSalt s (unId $ unKey' k) 47 | 48 | instance Binary Key where 49 | put (Key i) = put i 50 | get = Key <$> get 51 | 52 | instance JSON.FromJSON Key where 53 | parseJSON (JSON.String s) = case fromBase64' s of 54 | Left e -> fail $ T.unpack e 55 | Right bs -> case mkKey bs of 56 | Right k -> pure k 57 | Left e -> fail $ T.unpack e 58 | 59 | parseJSON _ = mzero 60 | 61 | instance JSON.ToJSON Key where 62 | toJSON = JSON.toJSON . toBase64' . unKey 63 | 64 | instance HasId Key where 65 | getId = unKey' 66 | 67 | instance Arbitrary Key where 68 | arbitrary = mkKey' . BS.pack <$> replicateM 32 arbitrary 69 | 70 | mkKey :: BS.ByteString -> Either T.Text Key 71 | mkKey bs = if BS.length bs == keySize 72 | then Right $ Key $ mkId' bs 73 | else Left $ "keys must be 32 bytes" 74 | 75 | mkKey' :: BS.ByteString -> Key 76 | mkKey' bs = Key $ mkId' bs 77 | 78 | ----------------------------------------------------------------------------------------- 79 | -- data blocks, requests and routing them 80 | ----------------------------------------------------------------------------------------- 81 | 82 | -- | 83 | -- A still encrypted, but verified, chunk of data. 84 | class DataBlock f where 85 | 86 | -- | 87 | -- Use @freenetLocation@ to implement this. Really. 88 | dataBlockLocation :: f -> Key 89 | 90 | -- | 91 | -- Decrypts the data block. Be aware that the resulting data may still 92 | -- be compressed. If and how to decompress is specific to the data block. 93 | decryptDataBlock 94 | :: f 95 | -> Key -- ^ the secret key 96 | -> Either T.Text (BS.ByteString, Int) -- ^ either an error or (decrypted payload, original length) 97 | 98 | class DataBlock a => StorePersistable a where 99 | storeSize :: a -> Int 100 | storePut :: a -> Put 101 | storeGet :: Get a 102 | 103 | -- | 104 | -- Gives the true location (aka "routing key") of a data block. When it comes to actually 105 | -- routing data, Freenet does not directly use the routing key stored with the blocks, but 106 | -- does another round of SHA256 using the location and the key type (a Word16) as input. 107 | -- The top 64 bits of the result are then interpreted as an signed log, which gets it's sign bit 108 | -- reset, giving the true routing destination. It's in freenet.keys.Key.toNormalizedDouble(). 109 | -- Sigh. 110 | -- 111 | -- What this function aims for: 112 | -- * we want locations close in Freenet keyspace to be close in our keyspace as well (and vice versa) 113 | -- * we do not want to lose 203 bits of precision which are caused by 114 | -- Freenet's conversion to a double 115 | freenetLocation 116 | :: Key -- ^ the original location stored with the request / block 117 | -> Word16 -- ^ the "type", see freenet.keys.Key.getType() 118 | -> Key -- ^ the modified location 119 | freenetLocation key tp = mkKey' padded where 120 | digest = bsToStrict $ bytestringDigest $ sha256 $ BSL.fromChunks [unKey key, bsToStrict $ encode tp] 121 | digest' = let (msb, lsb) = BS.splitAt 8 digest in bsToPosI $ (BS.reverse msb) `BS.append` lsb 122 | digest'' 123 | | testBit digest' 255 = (digest' `xor` (0xffffffffffffffff `shiftL` 192)) `shiftL` 1 124 | | otherwise = digest' `shiftL` 1 125 | bs = posIToBs digest'' 126 | padded = (BS.replicate (32 - BS.length bs) 0) `BS.append` bs 127 | 128 | class DataRequest a where 129 | 130 | -- | 131 | -- Use @freenetLocation@ to implement this. 132 | dataRequestLocation :: a -> Key 133 | 134 | -------------------------------------------------------------------------------- /src/lib/Freenet/URI.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Freenet.URI ( 5 | URI(..), parseUri, 6 | isControlDocument, uriPath, uriCryptoKey, 7 | appendUriPath, 8 | 9 | -- * CHKs 10 | ChkExtra, mkChkExtra, chkExtraCrypto, chkExtraCompression, 11 | 12 | -- * SSKs 13 | sskExtraCrypto 14 | ) where 15 | 16 | import Control.Applicative ( (<$>) ) 17 | import Control.Monad ( replicateM ) 18 | import Data.Binary 19 | import Data.Binary.Get 20 | import Data.Binary.Put 21 | import qualified Data.ByteString as BS 22 | import qualified Data.Text as T 23 | import Data.Text.Encoding ( decodeUtf8', encodeUtf8 ) 24 | 25 | import Freenet.Base64 26 | import Freenet.Compression 27 | import Freenet.Types 28 | import Utils 29 | 30 | ----------------------------------------------------------------- 31 | -- inserting / fetching data 32 | ----------------------------------------------------------------- 33 | 34 | data URI 35 | = CHK 36 | { chkLocation :: Key -- ^ the routing key 37 | , chkKey :: Key -- ^ the crypto key 38 | , chkExtra :: ChkExtra -- ^ extra data about algorithms used, always 5 bytes 39 | , chkPath :: [T.Text] -- ^ the path, already split at "/" chars 40 | } 41 | | SSK 42 | { sskPubKeyHash :: Key -- ^ the hash of the public key 43 | , sskKey :: Key -- ^ the crypto key which allows to decrypt the payload 44 | , sskExtra :: SskExtra -- ^ extra information about used algorithms and document type 45 | , sskDocName :: T.Text -- ^ the document name, which is the mandatory first path element 46 | , sskPath :: [T.Text] -- ^ the remainder of the path 47 | } 48 | | USK 49 | { uskPubKeyHash :: Key 50 | , uskKey :: Key 51 | , uskExtra :: SskExtra 52 | , uskDocName :: T.Text 53 | , uskRevision :: Integer 54 | , uskPath :: [T.Text] 55 | } deriving ( Eq, Ord ) 56 | 57 | instance Show URI where 58 | show (CHK l k e p) = 59 | "CHK@" ++ show l ++ "," ++ show k ++ "," ++ show e ++ "/" ++ (T.unpack $ T.intercalate "/" p) 60 | 61 | show (SSK l k e d p) = 62 | "SSK@" ++ show l ++ "," ++ show k ++ "," ++ show e ++ "/" ++ T.unpack d ++ "/" ++ (T.unpack $ T.intercalate "/" p) 63 | 64 | show (USK l k e d r p) = 65 | "USK@" ++ show l ++ "," ++ show k ++ "," ++ show e ++ "/" ++ T.unpack d ++ "/" ++ show r ++ "/" ++ (T.unpack $ T.intercalate "/" p) 66 | 67 | uriCryptoKey :: URI -> Key 68 | uriCryptoKey (CHK _ k _ _) = k 69 | uriCryptoKey (SSK _ k _ _ _) = k 70 | uriCryptoKey (USK _ k _ _ _ _) = k 71 | 72 | appendUriPath :: URI -> [T.Text] -> URI 73 | appendUriPath uri@(CHK {}) p = uri { chkPath = (chkPath uri) ++ p } 74 | appendUriPath uri@(SSK {}) p = uri { sskPath = (sskPath uri) ++ p } 75 | appendUriPath uri@(USK {}) p = uri { uskPath = (uskPath uri) ++ p } 76 | 77 | -- | 78 | -- this should be compatible with Java's DataOutput.writeUTF(..) 79 | -- method. FIXME: make it so! 80 | getUTF8 :: Get T.Text 81 | getUTF8 = do 82 | len <- getWord16be 83 | bs <- getByteString $ fromIntegral len 84 | case decodeUtf8' bs of 85 | Left e -> fail $ "error in getUTF8: " ++ show e 86 | Right t -> return t 87 | 88 | putUTF8 :: T.Text -> Put 89 | putUTF8 txt 90 | | len > (fromIntegral (maxBound :: Word32)) = error "putUTF8: string too long" 91 | | otherwise = putWord16be (fromIntegral len) >> putByteString bs 92 | where 93 | len = BS.length bs 94 | bs = encodeUtf8 txt 95 | 96 | instance Binary URI where 97 | put (CHK rk ck ex ps) = 98 | putWord8 1 >> put rk >> put ck >> put ex >> putWord32be (fromIntegral $ length ps) >> mapM_ putUTF8 ps 99 | 100 | put x = error $ "can't put " ++ show x 101 | 102 | get = do 103 | t <- getWord8 104 | 105 | case t of 106 | 1 -> do -- CHK 107 | rk <- get 108 | ck <- get 109 | ex <- get 110 | mc <- getWord32be 111 | ps <- replicateM (fromIntegral mc) $ getUTF8 112 | return $ CHK rk ck ex ps 113 | 114 | 2 -> do -- SSK 115 | rk <- get 116 | ck <- get 117 | ex <- get 118 | dn <- getUTF8 119 | mc <- getWord32be 120 | ps <- replicateM (fromIntegral mc) $ getUTF8 121 | return $ SSK rk ck ex dn ps 122 | 123 | x -> fail $ "unknown URI type " ++ show x 124 | 125 | parseUri :: T.Text -> Either T.Text URI 126 | parseUri str = case T.take 4 str of 127 | "CHK@" -> parseChk (T.drop 4 str) 128 | "SSK@" -> parseSsk (T.drop 4 str) 129 | "USK@" -> parseUsk (T.drop 4 str) 130 | _ -> Left $ T.concat ["cannot recognize URI type of \"", str, "\""] 131 | 132 | parseUsk :: T.Text -> Either T.Text URI 133 | parseUsk str = let (str', path) = T.span (/= '/') str in case T.split (== ',') str' of 134 | [rstr, cstr, estr] -> do 135 | rk <- fromBase64' rstr >>= mkKey 136 | ck <- fromBase64' cstr >>= mkKey 137 | e <- fromBase64' estr >>= \eb -> if BS.length eb == 5 138 | then Right $ eb 139 | else Left "USK extra data must be 5 bytes" 140 | let 141 | path' = T.drop 1 path 142 | ps = if T.null path' then [] else T.split (== '/') path' 143 | 144 | -- for SSKs, the first path element is the docname and mandatory 145 | if length ps < 2 146 | then Left "missing USK document name / revision" 147 | else let 148 | [docname, revstr] = take 2 ps 149 | rs = reads $ T.unpack revstr 150 | in if null rs 151 | then Left $ "can't parse USK revision" 152 | else Right $ USK rk ck (SskExtra e) docname (fst $ head rs) (drop 2 ps) 153 | 154 | _ -> Left $ T.concat $ ["expected 3 comma-separated parts in \"", str, "\""] 155 | 156 | parseSsk :: T.Text -> Either T.Text URI 157 | parseSsk str = let (str', path) = T.span (/= '/') str in case T.split (== ',') str' of 158 | [rstr, cstr, estr] -> do 159 | rk <- fromBase64' rstr >>= mkKey 160 | ck <- fromBase64' cstr >>= mkKey 161 | e <- fromBase64' estr >>= \eb -> if BS.length eb == 5 162 | then Right $ eb 163 | else Left "SSK extra data must be 5 bytes" 164 | let 165 | path' = T.drop 1 path 166 | ps = if T.null path' then [] else T.split (== '/') path' 167 | 168 | -- for SSKs, the first path element is the docname and mandatory 169 | if null ps 170 | then Left "missing SSK document name" 171 | else Right $ SSK rk ck (SskExtra e) (head ps) (tail ps) 172 | _ -> Left $ T.concat $ ["expected 3 comma-separated parts in \"", str, "\""] 173 | 174 | parseChk :: T.Text -> Either T.Text URI 175 | parseChk str = let (str', path) = T.span (/= '/') str in case T.split (== ',') str' of 176 | [rstr, cstr, estr] -> do 177 | rk <- fromBase64' rstr >>= mkKey 178 | ck <- fromBase64' cstr >>= mkKey 179 | e <- fromBase64' estr >>= \eb -> if BS.length eb == 5 180 | then Right $ eb 181 | else Left "CHK extra data must be 5 bytes" 182 | let 183 | path' = T.drop 1 path 184 | ps = if T.null path' then [] else T.split (== '/') path' 185 | 186 | return $ CHK rk ck (ChkExtra e) ps -- T.split (== '/') (T.drop 1 path) 187 | _ -> Left $ T.concat $ ["expected 3 comma-separated parts in \"", str, "\""] 188 | 189 | -- | 190 | -- Decides if an URI is expected to point to a metadata block 191 | -- (aka control document). This is usually the case for URIs 192 | -- presented to the user. The control document will generally 193 | -- provide information how to assemble the original data referenced 194 | -- by the URI and specify an MIME type. 195 | isControlDocument :: URI -> Bool 196 | isControlDocument (CHK _ _ e _) = chkExtraIsControl e 197 | isControlDocument (SSK {}) = True -- to my knowledge, this is always true 198 | isControlDocument (USK {}) = True -- this one, too 199 | 200 | uriPath :: URI -> [T.Text] 201 | uriPath (CHK _ _ _ p) = p 202 | uriPath (SSK _ _ _ _ p) = p 203 | uriPath (USK _ _ _ _ _ p) = p 204 | 205 | -------------------------------------------------------------------------------------- 206 | -- CHK extra data (last URI component) 207 | -------------------------------------------------------------------------------------- 208 | 209 | newtype ChkExtra = ChkExtra { unChkExtra :: BS.ByteString } deriving ( Eq, Ord ) 210 | 211 | instance Show ChkExtra where 212 | show (ChkExtra bs) = T.unpack $ toBase64' bs 213 | 214 | instance Binary ChkExtra where 215 | put (ChkExtra bs) = putByteString bs 216 | get = ChkExtra <$> getByteString 5 217 | 218 | -- | construct CHK extra data 219 | mkChkExtra 220 | :: Word8 -- ^ crypto algorithm 221 | -> Word16 -- ^ comptression algorithm 222 | -> Bool -- ^ control document 223 | -> ChkExtra -- ^ resulting 5 bytes of CHK key "extra" data 224 | mkChkExtra crypt compr contr = ChkExtra $ bsToStrict $ 225 | runPut $ putWord8 0 >> put crypt >> putWord8 ctrl >> put compr 226 | where 227 | ctrl = if contr then 2 else 0 228 | 229 | -- | extract the crypto algorithm used from an ChkExtra 230 | chkExtraCrypto :: ChkExtra -> Word8 231 | chkExtraCrypto ce = BS.index (unChkExtra ce) 1 232 | 233 | chkExtraIsControl :: ChkExtra -> Bool 234 | chkExtraIsControl ce = 2 == BS.index (unChkExtra ce) 2 235 | 236 | chkExtraCompression :: ChkExtra -> Either T.Text CompressionCodec 237 | chkExtraCompression ce 238 | | c > 0x8000 = Right None 239 | | otherwise = case c of 240 | 0 -> Right Gzip 241 | 1 -> Right Bzip2 242 | 2 -> Right LZMA 243 | 3 -> Right LZMA_NEW 244 | x -> Left $ "unknown CHK compression codec: " `T.append` (T.pack $ show x) 245 | where 246 | c = decode $ bsFromStrict $ BS.drop 3 $ unChkExtra ce :: Word16 247 | 248 | --------------------------------------------------------------------------- 249 | -- SSK specifics 250 | --------------------------------------------------------------------------- 251 | 252 | newtype SskExtra = SskExtra { unSskExtra :: BS.ByteString } deriving ( Eq, Ord ) 253 | 254 | instance Show SskExtra where 255 | show (SskExtra bs) = T.unpack $ toBase64' bs 256 | 257 | instance Binary SskExtra where 258 | put (SskExtra bs) = putByteString bs 259 | get = SskExtra <$> getByteString 5 260 | 261 | -- | 262 | -- extracts the crypto algorithm from SSK extra data, 263 | -- which is at (zero based) index 2 264 | sskExtraCrypto :: SskExtra -> Word8 265 | sskExtraCrypto se = BS.index (unSskExtra se) 2 266 | -------------------------------------------------------------------------------- /src/lib/Logging.hs: -------------------------------------------------------------------------------- 1 | 2 | module Logging ( 3 | initLogging, 4 | 5 | module System.Log.Logger 6 | 7 | ) where 8 | 9 | --import qualified Data.Configurator as CFG 10 | --import qualified Data.Configurator.Types as CFG 11 | import System.IO ( stderr ) 12 | import System.Log.Handler.Simple 13 | import System.Log.Logger 14 | 15 | -- | configure log handlers 16 | initLogging :: IO () 17 | initLogging = do 18 | h <- verboseStreamHandler stderr DEBUG 19 | updateGlobalLogger rootLoggerName $ setHandlers [h] 20 | updateGlobalLogger rootLoggerName $ setLevel INFO 21 | -------------------------------------------------------------------------------- /src/lib/Message.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE MultiParamTypeClasses #-} 3 | 4 | module Message ( 5 | MessagePayload(..), RoutedMessage(..), 6 | MessageSource, MessageSink, MessageIO, 7 | Message(..), Routable(..), 8 | 9 | -- * Message IDs 10 | MessageId, MessageIdGen, mkMessageIdGen, nextMessageId 11 | ) where 12 | 13 | import Control.Applicative ( (<$>), (<*>) ) 14 | import Control.Concurrent ( forkIO ) 15 | import Control.Concurrent.STM 16 | import Control.Monad ( void ) 17 | import qualified Crypto.Random.AESCtr as AESRNG 18 | import Data.Binary 19 | import Data.Conduit 20 | import System.Random ( random ) 21 | 22 | import qualified Freenet.Chk as FN 23 | import qualified Freenet.Ssk as FN 24 | import Types 25 | 26 | -- | 27 | -- A source of messages, which usually would be another node 28 | -- talking to us. 29 | type MessageSource a = Source IO (Message a) 30 | 31 | -- | 32 | -- A sink for outgoing messages to another node. 33 | type MessageSink a = Sink (Message a) IO () 34 | 35 | -- | 36 | -- A (source, sink) pair of messages, suitable for talking to a node. 37 | type MessageIO a = (MessageSource a, MessageSink a) 38 | 39 | ------------------------------------------------------------------------------------- 40 | -- Message IDs 41 | ------------------------------------------------------------------------------------- 42 | 43 | type MessageId = Word64 44 | 45 | newtype MessageIdGen = MessageIdGen { unMessageIdGen :: TBQueue Word64 } 46 | 47 | mkMessageIdGen :: IO MessageIdGen 48 | mkMessageIdGen = do 49 | q <- newTBQueueIO 64 50 | rng <- AESRNG.makeSystem 51 | 52 | let makeId r = let (next, r') = random r 53 | in do 54 | atomically $ writeTBQueue q next 55 | makeId r' 56 | 57 | void $ forkIO $ makeId rng 58 | return $ MessageIdGen q 59 | 60 | nextMessageId :: MessageIdGen -> STM MessageId 61 | nextMessageId = readTBQueue . unMessageIdGen 62 | 63 | ------------------------------------------------------------------------------------- 64 | -- Message Payload 65 | ------------------------------------------------------------------------------------- 66 | 67 | -- | 68 | -- Messages are parametrised over the type of Peer addresses used, which could 69 | -- be either hostnames or message queues for simulations. 70 | data MessagePayload a 71 | = Hello (NodeInfo a) 72 | | Ping 73 | | GetPeerList -- ^ request for getting some peers which we might connect to 74 | | PeerList [NodeInfo a] -- ^ response to @GetPeers@ request 75 | | FreenetChkRequest FN.ChkRequest 76 | | FreenetChkBlock FN.ChkBlock 77 | | FreenetSskRequest FN.SskRequest 78 | | FreenetSskBlock FN.SskBlock 79 | | Bye String 80 | | Failed (Maybe String) -- ^ 81 | deriving ( Show ) 82 | 83 | instance (Binary a) => Binary (MessagePayload a) where 84 | put (Hello peer) = putHeader 1 >> put peer 85 | put Ping = putHeader 2 86 | put GetPeerList = putHeader 3 87 | put (PeerList ps) = putHeader 4 >> put ps 88 | put (FreenetChkRequest dr) = putHeader 5 >> put dr 89 | put (FreenetChkBlock blk) = putHeader 6 >> put blk 90 | put (FreenetSskRequest dr) = putHeader 7 >> put dr 91 | put (FreenetSskBlock blk) = putHeader 8 >> put blk 92 | put (Bye msg) = putHeader 9 >> put msg 93 | put (Failed reason) = putHeader 10 >> put reason 94 | 95 | get = do 96 | t <- getWord8 97 | 98 | case t of 99 | 1 -> Hello <$> get 100 | 2 -> return Ping 101 | 3 -> return GetPeerList 102 | 4 -> PeerList <$> get 103 | 5 -> FreenetChkRequest <$> get 104 | 6 -> FreenetChkBlock <$> get 105 | 7 -> FreenetSskRequest <$> get 106 | 8 -> FreenetSskBlock <$> get 107 | 9 -> Bye <$> get 108 | 10 -> Failed <$> get 109 | _ -> fail $ "unknown message type " ++ show t 110 | 111 | -- | 112 | -- a message which should be routed to another peer 113 | data Message a = Routed Bool (RoutedMessage a) -- ^ is this a backtrack step? and the routed message 114 | | Response MessageId (MessagePayload a) 115 | | Direct (MessagePayload a) 116 | deriving (Show) 117 | 118 | instance Binary a => Binary (Message a) where 119 | put (Routed False msg) = putHeader 1 >> put msg 120 | put (Routed True msg) = putHeader 2 >> put msg 121 | put (Response mid msg) = putHeader 3 >> put mid >> put msg 122 | put (Direct msg) = putHeader 4 >> put msg 123 | 124 | get = do 125 | t <- getWord8 126 | case t of 127 | 1 -> Routed False <$> get 128 | 2 -> Routed True <$> get 129 | 3 -> Response <$> get <*> get 130 | 4 -> Direct <$> get 131 | x -> fail $ "unknown message type " ++ show x 132 | 133 | data RoutedMessage a = RoutedMessage 134 | { rmPayload :: MessagePayload a 135 | , rmId :: MessageId 136 | , rmMarked :: [Id] 137 | , rmTarget :: Id 138 | } 139 | deriving ( Show ) 140 | 141 | instance Binary a => Binary (RoutedMessage a) where 142 | put (RoutedMessage p mid ms tgt) = put p >> put mid >> put ms >> put tgt 143 | get = RoutedMessage <$> get <*> get <*> get <*> get 144 | 145 | class HasLocation l => Routable m l where 146 | routeMarked :: m -> l -> Bool -- ^ is the location already marked? 147 | routeMark :: m -> l -> m -- ^ mark the specified location 148 | routeTarget :: m -> l -- ^ where should the message be routed 149 | 150 | instance Routable (RoutedMessage a) Id where 151 | routeTarget = rmTarget 152 | routeMarked rm l = l `elem` (rmMarked rm) 153 | routeMark rm l 154 | | l `elem` (rmMarked rm) = rm 155 | | otherwise = rm { rmMarked = (l : (rmMarked rm)) } 156 | 157 | putHeader :: Word8 -> Put 158 | putHeader t = put t 159 | 160 | -------------------------------------------------------------------------------- /src/lib/Net.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | -- | 5 | -- TCP/IP Networking 6 | module Net ( 7 | TcpAddress(..), nodeListen 8 | ) where 9 | 10 | import Control.Applicative ( (<$>), (<*>) ) 11 | import Control.Concurrent.STM 12 | import Control.Monad ( mzero ) 13 | import qualified Data.ByteString.Char8 as BSC 14 | import qualified Data.Configurator as CFG 15 | import qualified Data.Configurator.Types as CFG 16 | import Data.String ( fromString ) 17 | import Control.Concurrent ( forkIO ) 18 | import Control.Monad ( void ) 19 | import Data.Aeson 20 | import Data.Binary 21 | import Data.Conduit 22 | import Data.Conduit.Network 23 | import Data.Conduit.Serialization.Binary 24 | import System.IO.Error ( catchIOError ) 25 | 26 | import Logging 27 | import Message 28 | import Node 29 | import Peers 30 | 31 | logD :: String -> IO () 32 | logD = debugM "net" 33 | 34 | logI :: String -> IO () 35 | logI m = infoM "net" m 36 | 37 | data TcpAddress = TcpAddress String Int deriving ( Eq, Show ) 38 | 39 | instance Binary TcpAddress where 40 | put (TcpAddress h p) = put h >> put p 41 | get = TcpAddress <$> get <*> get 42 | 43 | instance FromJSON TcpAddress where 44 | parseJSON (Object v) = TcpAddress <$> 45 | v .: "host" <*> 46 | v .: "port" 47 | parseJSON _ = mzero 48 | 49 | instance ToJSON TcpAddress where 50 | toJSON (TcpAddress h p) = object 51 | [ "host" .= h 52 | , "port" .= p 53 | ] 54 | 55 | instance PeerAddress TcpAddress where 56 | connectPeer = tcpConnect 57 | 58 | -- | 59 | -- listens on the configured addresses and accepts incomping peer connections 60 | nodeListen :: CFG.Config -> Node TcpAddress -> IO () 61 | nodeListen cfg node = do 62 | host <- CFG.require cfg "host" 63 | port <- CFG.require cfg "port" 64 | 65 | let 66 | s = serverSettings port (fromString host) 67 | 68 | void $ forkIO $ runTCPServer s $ \ad -> do 69 | logI $ "incoming connection from " ++ (show $ appSockAddr ad) 70 | peerConnecting node (appSource ad $= conduitDecode, conduitEncode =$ appSink ad) 71 | 72 | infoM "net" $ "node listening on " ++ host ++ ":" ++ show port 73 | 74 | -- | 75 | -- Connects to a @Peer@ using TCP sockets. 76 | tcpConnect :: Peer TcpAddress -> (Either String (MessageIO TcpAddress) -> IO ()) -> IO () 77 | tcpConnect peer handler = do 78 | addrs <- atomically . readTVar $ peerAddresses peer 79 | 80 | let 81 | tryConnect [] = handler $ Left "no addresses left" 82 | tryConnect (x@(TcpAddress host port) : xs) = do 83 | catchIOError 84 | (do 85 | logD $ "connecting to " ++ show (peerId peer) ++ " @ " ++ show x 86 | runTCPClient (clientSettings port $ BSC.pack host) $ \ad -> do 87 | logI $ "connected to " ++ show x 88 | handler $ Right (appSource ad $= conduitDecode, conduitEncode =$ appSink ad)) 89 | (\e -> do 90 | logD $ "error connecting to " ++ show (peerId peer) ++ " @ " ++ show x ++ ": " ++ show e 91 | tryConnect xs) 92 | 93 | tryConnect addrs 94 | -------------------------------------------------------------------------------- /src/lib/Peers.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Peers ( 5 | mkNodeInfo, 6 | 7 | PeerAddress(..), readPeers, 8 | Peer(..), mkPeer, peerFetchDone 9 | ) where 10 | 11 | import Control.Concurrent.STM 12 | import Data.Aeson 13 | import qualified Data.ByteString.Lazy as BSL 14 | import System.FilePath ( () ) 15 | import System.IO.Error ( catchIOError ) 16 | import System.Log.Logger 17 | 18 | import Freenet.Types 19 | import Message 20 | import Statistics 21 | import Types 22 | 23 | logI :: String -> IO () 24 | logI = infoM "peers" 25 | 26 | -- | 27 | -- Extracts the persistable node information from a peer. 28 | mkNodeInfo :: Peer a -> STM (NodeInfo a) 29 | mkNodeInfo (Peer pid addrs _) = do 30 | as <- readTVar addrs 31 | return $ NodeInfo pid as 32 | 33 | ---------------------------------------------------------------- 34 | -- Peers 35 | ---------------------------------------------------------------- 36 | 37 | class (Show a, Eq a) => PeerAddress a where 38 | connectPeer :: Peer a -> (Either String (MessageIO a) -> IO ()) -> IO () 39 | 40 | -- | 41 | -- This is the live version of @NodeInfo@, which can be constructed 42 | -- from @NodeInfo@ and converted to @NodeInfo@ for storage and transfer. 43 | data Peer a = Peer 44 | { peerId :: ! Id 45 | , peerAddresses :: ! (TVar [a]) -- ^ where this peer can be connected 46 | , peerSuccessEst :: ! TEstimator -- ^ estimator for P(success) by location 47 | } 48 | 49 | instance Eq (Peer a) where 50 | (==) p1 p2 = (peerId p1) == (peerId p2) 51 | 52 | instance ToJSON a => ToStateJSON (Peer a) where 53 | toStateJSON (Peer pid adds ps) = do 54 | adds' <- readTVar adds 55 | ps' <- toStateJSON ps 56 | 57 | return $ object 58 | [ "id" .= pid 59 | , "addresses" .= adds' 60 | , "psuccess" .= ps' 61 | ] 62 | 63 | mkPeer :: PeerAddress a => NodeInfo a -> STM (Peer a) 64 | mkPeer (NodeInfo nid addrs) = do 65 | as <- newTVar addrs 66 | ps <- mkTEstimator 128 0.01 0.5 67 | return $ Peer nid as ps 68 | 69 | -- | 70 | -- Update the peer statistics after a successful fetch from that peer. 71 | peerFetchDone 72 | :: Peer a -- ^ the peer who get it's status updated 73 | -> Key -- ^ the key that was fetched 74 | -> Bool -- ^ if the fetch was successful 75 | -> STM () 76 | peerFetchDone peer key suc = updateTEstimator (peerSuccessEst peer) (toLocation key) suc' 77 | where 78 | suc' = if suc then 1 else 0 79 | 80 | readPeers 81 | :: (FromJSON a, PeerAddress a) 82 | => FilePath -- ^ app data directory containing the peers file 83 | -> IO (Either String [NodeInfo a]) 84 | readPeers dataDir = do 85 | let 86 | kpFile = dataDir "peers" 87 | 88 | logI $ "reading known peers from " ++ kpFile 89 | catchIOError 90 | (fmap eitherDecode $ BSL.readFile kpFile) 91 | (\e -> return $ Left $ show e) 92 | -------------------------------------------------------------------------------- /src/lib/Requests.hs: -------------------------------------------------------------------------------- 1 | 2 | module Requests ( 3 | -- * Requesting Data 4 | 5 | UriFetch(..) 6 | 7 | -- * Inserting Data 8 | 9 | ) where 10 | 11 | import Control.Concurrent ( forkIO ) 12 | import Control.Concurrent.STM 13 | import Control.Monad ( void, when ) 14 | import qualified Data.ByteString as BS 15 | import qualified Data.HashMap.Strict as HMap 16 | import qualified Data.Text as T 17 | 18 | import qualified Freenet.Chk as FN 19 | import qualified Freenet.Ssk as FN 20 | import qualified Freenet.Types as FN 21 | import qualified Freenet.URI as FN 22 | import Message 23 | import Node 24 | 25 | class UriFetch a where 26 | getUriData :: a -> FN.URI -> IO (Either T.Text (BS.ByteString, Int)) 27 | 28 | 29 | 30 | ------------------------------------------------------------------------------------------------- 31 | -- Organizing data requests 32 | ------------------------------------------------------------------------------------------------- 33 | 34 | data Delayed d = Delayed ! (TMVar (Maybe d)) 35 | 36 | waitDelayed :: Delayed d -> STM (Maybe d) 37 | waitDelayed (Delayed d) = readTMVar d 38 | 39 | data RequestManager r d = RequestManager 40 | { rmRequests :: ! (TVar (HMap.HashMap FN.Key (Delayed d))) 41 | , rmTimeout :: ! Int 42 | } 43 | 44 | mkRequestManager :: STM (RequestManager r d) 45 | mkRequestManager = do 46 | reqs <- newTVar HMap.empty 47 | return $! RequestManager reqs (30 * 1000 * 1000) 48 | 49 | offer :: (FN.DataBlock d) => d -> RequestManager r d -> STM () 50 | offer db rmgr = do 51 | 52 | let 53 | key = FN.dataBlockLocation db 54 | 55 | rm <- readTVar (rmRequests rmgr) 56 | 57 | case HMap.lookup key rm of 58 | Nothing -> return () 59 | Just (Delayed d) -> do 60 | putTMVar d (Just db) 61 | writeTVar (rmRequests rmgr) $ HMap.delete key rm 62 | 63 | request :: (FN.DataRequest r) => RequestManager r d -> r -> (r -> IO ()) -> IO (Delayed d) 64 | request rmgr dr act = do 65 | let 66 | key = FN.dataRequestLocation dr 67 | checkTimeout (Delayed d) to = orElse 68 | (isEmptyTMVar d >>= \e -> when e retry) 69 | (readTVar to >>= \t -> if t 70 | then putTMVar d Nothing >> modifyTVar' (rmRequests rmgr) (HMap.delete key) 71 | else retry) 72 | 73 | (result, needStart) <- atomically $ do 74 | rm <- readTVar (rmRequests rmgr) 75 | case HMap.lookup key rm of 76 | Just old -> return (old, False) -- request is already running 77 | Nothing -> do -- make new Delayed 78 | b <- newEmptyTMVar 79 | let d = Delayed b 80 | writeTVar (rmRequests rmgr) $ HMap.insert key d rm 81 | return (d, True) 82 | 83 | when needStart $ do 84 | to <- registerDelay $ rmTimeout rmgr 85 | void $ forkIO $ atomically $ checkTimeout result to 86 | act dr 87 | 88 | return result 89 | 90 | ------------------------------------------------------------------------------------------ 91 | -- fetching data 92 | ------------------------------------------------------------------------------------------ 93 | {- 94 | nodeFetchChk :: PeerAddress a => Node a -> FN.ChkRequest -> ((Either T.Text FN.ChkBlock) -> IO b) -> IO b 95 | nodeFetchChk node req k = do 96 | fromStore <- FN.getChk (nodeFreenet node) req 97 | 98 | case fromStore of 99 | Right blk -> k $ Right blk 100 | Left _ -> do 101 | d <- request (nodeChkRequests node) req $ \r -> do 102 | mkRoutedMessage node (FN.dataRequestLocation req) (FreenetChkRequest r) 103 | 104 | result <- atomically $ waitDelayed d 105 | 106 | case result of 107 | Nothing -> k $ Left "timeout waiting for CHK data" 108 | Just blk -> k $ Right blk 109 | 110 | nodeFetchSsk :: PeerAddress a => Node a -> FN.SskRequest -> ((Either T.Text FN.SskBlock) -> IO b) -> IO b 111 | nodeFetchSsk node req k = do 112 | fromStore <- FN.getSsk (nodeFreenet node) req 113 | 114 | case fromStore of 115 | Right blk -> k $ Right blk 116 | Left _ -> do 117 | d <- request (nodeSskRequests node) req $ \r -> do 118 | mkRoutedMessage node (FN.dataRequestLocation req) (FreenetSskRequest r) 119 | 120 | result <- atomically $ waitDelayed d 121 | 122 | case result of 123 | Nothing -> k $ Left "timeout waiting for SSK data" 124 | Just blk -> k $ Right blk 125 | 126 | requestNodeData :: PeerAddress a => Node a -> FN.URI -> IO (Either T.Text (BS.ByteString, Int)) 127 | 128 | requestNodeData n (FN.CHK loc key extra _) = 129 | case FN.chkExtraCompression extra of 130 | Left e -> return $ Left $ "can't decompress CHK: " `T.append` e 131 | Right c -> nodeFetchChk n (FN.ChkRequest loc $ FN.chkExtraCrypto extra) $ \result -> 132 | case result of 133 | Left e -> return $ Left e 134 | Right blk -> decrypt blk where 135 | decrypt b = case FN.decryptDataBlock b key of 136 | Left e -> return $ Left $ "decrypting CHK data block failed: " `T.append` e 137 | Right (p, pl) -> FN.decompressChk c p pl 138 | 139 | requestNodeData n (FN.SSK pkh key extra dn _) = do 140 | let 141 | req = FN.SskRequest pkh (FN.sskEncryptDocname key dn) (FN.sskExtraCrypto extra) 142 | decrypt blk = FN.decryptDataBlock blk key 143 | 144 | fromStore <- FN.getSsk (nodeFreenet n) req 145 | 146 | case fromStore of 147 | Right blk -> return $ decrypt blk -- (BSL.take (fromIntegral bl) $ BSL.fromStrict blk) 148 | Left _ -> do 149 | d <- request (nodeSskRequests n) req $ \r -> do 150 | mkRoutedMessage n (FN.dataRequestLocation req) (FreenetSskRequest r) 151 | 152 | result <- atomically $ waitDelayed d 153 | 154 | case result of 155 | Nothing -> return $ Left "timeout waiting for SSK data" 156 | Just blk -> return $ decrypt blk 157 | 158 | requestNodeData n (FN.USK pkh key extra dn dr _) = do 159 | let 160 | dn' = dn `T.append` "-" `T.append` T.pack (show dr) 161 | req = FN.SskRequest pkh (FN.sskEncryptDocname key dn') (FN.sskExtraCrypto extra) 162 | decrypt blk = FN.decryptDataBlock blk key 163 | 164 | fromStore <- FN.getSsk (nodeFreenet n) req 165 | 166 | case fromStore of 167 | Right blk -> return $ decrypt blk 168 | Left _ -> do 169 | d <- request (nodeSskRequests n) req $ \r -> do 170 | mkRoutedMessage n 171 | (FN.dataRequestLocation req) 172 | (FreenetSskRequest r) 173 | 174 | result <- atomically $ waitDelayed d 175 | 176 | case result of 177 | Nothing -> return $ Left "timeout waiting for USK data" 178 | Just blk -> return $ decrypt blk 179 | 180 | -} 181 | -------------------------------------------------------------------------------- /src/lib/RestApi.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module RestApi ( 5 | startRestApi 6 | ) where 7 | 8 | import Control.Applicative ( (<|>), (<$>) ) 9 | import Control.Concurrent ( forkIO ) 10 | import Control.Concurrent.STM 11 | import Control.Monad ( void ) 12 | import Data.Aeson 13 | import qualified Data.Configurator as CFG 14 | import qualified Data.Configurator.Types as CFG 15 | import Data.String ( fromString ) 16 | import qualified Data.Text as T 17 | import Data.Text.Encoding ( decodeUtf8, encodeUtf8 ) 18 | import Network.HTTP.Types ( status200, status400 ) 19 | import qualified Network.Wai as WAI 20 | import Network.Wai.Application.Static 21 | import Network.Wai.Handler.Warp as Warp 22 | import qualified Network.Wai.Parse as WAI 23 | import Network.Wai.UrlMap 24 | 25 | import Freenet.Insert 26 | import Node 27 | import Peers 28 | import Types 29 | import Utils 30 | 31 | startRestApi :: (PeerAddress a, ToJSON a) => CFG.Config -> Node a -> IO () 32 | startRestApi cfg node = do 33 | host <- CFG.lookup cfg "host" 34 | port <- CFG.lookupDefault 8080 cfg "port" 35 | 36 | case host of 37 | Nothing -> return () 38 | Just h -> do 39 | void $ forkIO $ Warp.runSettings 40 | (Warp.setHost (fromString h) $ Warp.setPort port $ Warp.defaultSettings) 41 | (restApi node) 42 | 43 | restApi :: (PeerAddress a, ToJSON a) => Node a -> WAI.Application 44 | restApi node = mapUrls $ 45 | mount "api" 46 | (-- mount "fetch" 47 | -- ( mount "chk" (fetchChk node) 48 | -- <|> mount "ssk" (fetchSsk node) 49 | -- ) 50 | mount "insert" 51 | ( mount "file" (insertFile node) 52 | ) 53 | <|> mount "status" 54 | ( mount "peers" (connStatus node) 55 | <|> mount "routing" (routeStatus node) 56 | -- <|> mount "store" 57 | -- ( mount "chk" (stateJsonResponse $ nodeChkStore node) 58 | -- <|> mount "ssk" (stateJsonResponse $ nodeSskStore node) 59 | -- ) 60 | ) 61 | ) 62 | <|> mountRoot webUi 63 | 64 | webUi :: WAI.Application 65 | webUi = staticApp $ defaultFileServerSettings "./webUi" 66 | 67 | connStatus :: ToJSON a => Node a -> WAI.Application 68 | connStatus n r resp = nodeConnectStatus n >>= \o -> jsonResponse o r resp 69 | 70 | routeStatus :: ToJSON a => Node a -> WAI.Application 71 | routeStatus n r resp = nodeRouteStatus n >>= \o -> jsonResponse o r resp 72 | 73 | stateJsonResponse :: ToStateJSON a => a -> WAI.Application 74 | stateJsonResponse o req respond = 75 | atomically (toStateJSON o) >>= \o' -> jsonResponse o' req respond 76 | 77 | jsonResponse :: ToJSON a => a -> WAI.Application 78 | jsonResponse o _ respond = respond $ WAI.responseLBS status200 headers $ encode o 79 | where 80 | headers = [("Content-Type", "application/json")] 81 | 82 | badRequest :: T.Text -> WAI.Application 83 | badRequest msg _ respond = respond $ WAI.responseLBS status400 headers $ bsFromStrict $ encodeUtf8 msg 84 | where 85 | headers = [("Content-Type", "text/plain; charset=utf-8")] 86 | 87 | {- 88 | fetchChk :: PeerAddress a => Node a -> WAI.Application 89 | fetchChk node req = do 90 | chkReq <- eitherDecode <$> WAI.lazyRequestBody req 91 | 92 | case chkReq of 93 | Left e -> badRequest (T.pack e) req 94 | Right r -> nodeFetchChk node r $ \result -> do 95 | case result of 96 | Left e -> badRequest e req 97 | Right blk -> jsonResponse blk req 98 | 99 | fetchSsk :: PeerAddress a => Node a -> WAI.Application 100 | fetchSsk node req = do 101 | chkReq <- eitherDecode <$> WAI.lazyRequestBody req 102 | 103 | case chkReq of 104 | Left e -> badRequest (T.pack e) req 105 | Right r -> nodeFetchSsk node r $ \result -> do 106 | case result of 107 | Left e -> badRequest e req 108 | Right blk -> jsonResponse blk req 109 | -} 110 | 111 | insertFile :: PeerAddress a => Node a -> WAI.Application 112 | insertFile node req respond = do 113 | case WAI.getRequestBodyType req of 114 | Nothing -> badRequest "must post url encoded or multipart data here" req respond 115 | Just _ -> do 116 | (_, files) <- WAI.parseRequestBody WAI.lbsBackEnd req 117 | uris <- mapM (\(_, fi) -> insert node InsertCHK (InsertDirect (WAI.fileContent fi) (decodeUtf8 $ WAI.fileContentType fi))) files 118 | badRequest (T.pack $ show uris) req respond 119 | 120 | -------------------------------------------------------------------------------- /src/lib/Statistics.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | 3 | module Statistics ( 4 | -- * Histograms 5 | Histogram, mkHistogram, 6 | freezeHistogram, thawHistogram, 7 | 8 | -- ** Transactional Histograms 9 | THistogram, mkTHistogram, histInc, histDec, 10 | 11 | -- * Scale Free Estimators 12 | TEstimator, mkTEstimator, updateTEstimator, 13 | teToList 14 | ) where 15 | 16 | import Control.Applicative ( (<$>) ) 17 | import Control.Concurrent.STM 18 | import Data.Aeson 19 | import Data.Binary 20 | import qualified Data.Array.IArray as IA 21 | import Data.Array.MArray 22 | 23 | import Types 24 | 25 | ----------------------------------------------------------------------- 26 | -- Scale Free Estimator 27 | ----------------------------------------------------------------------- 28 | 29 | data TEstimator = TEstimator 30 | { estPoints :: ! (TArray Int (Double, Double)) 31 | , estFactor :: ! Double 32 | } 33 | 34 | instance ToStateJSON TEstimator where 35 | toStateJSON e = do 36 | assocs <- getAssocs $ estPoints e 37 | return $ toJSON assocs 38 | 39 | mkTEstimator 40 | :: Int -- ^ number of data points 41 | -> Double -- ^ scale factor determining how fast the estimator adapts 42 | -> Double -- ^ initial value for each data point 43 | -> STM TEstimator 44 | mkTEstimator cnt f v = do 45 | arr <- newListArray (0, cnt - 1) [(fromIntegral x / (fromIntegral cnt), v) | x <- [0..(cnt-1)]] 46 | return $ TEstimator arr f 47 | 48 | teToList :: TEstimator -> STM [(Double, Double)] 49 | teToList = getElems . estPoints 50 | 51 | wrapLoc :: Double -> Double 52 | wrapLoc l 53 | | l > 1 = l - 1 54 | | l < 0 = 1 + l 55 | | otherwise = l 56 | 57 | moveLoc :: Double -> Double -> Double -> Double 58 | moveLoc from to f 59 | | abs (from - to) > 0.5 = wrapLoc $ from - (1 - (to - from)) 60 | | otherwise = wrapLoc $ from + (to - from) * f 61 | 62 | updateTEstimator 63 | :: TEstimator 64 | -> Location -- ^ where to update the estimator 65 | -> Double -- ^ new value 66 | -> STM () 67 | updateTEstimator est loc val = do 68 | (l, r) <- teIndices est loc 69 | 70 | let 71 | arr = estPoints est 72 | f = estFactor est 73 | loc' = fromRational $ unLocation loc 74 | 75 | readArray arr l >>= \(x0, y0) -> 76 | writeArray arr l (moveLoc x0 loc' f, y0 + (val - y0) * f) 77 | 78 | readArray arr r >>= \(x1, y1) -> 79 | writeArray arr r (moveLoc x1 loc' f, y1 + (val - y1) * f) 80 | 81 | -- | 82 | -- Finds the (left, right) index pair for the given location. 83 | teIndices :: TEstimator -> Location -> STM (Int, Int) 84 | teIndices est loc = getBounds (estPoints est) >>= \(_, maxIdx) -> go maxIdx 0 85 | where 86 | loc' = fromRational $ unLocation loc 87 | go mi n 88 | | n == mi = return (mi, 0) -- wrap around 89 | | otherwise = do 90 | (x1, _) <- readArray (estPoints est) n 91 | (x2, _) <- readArray (estPoints est) $ n + 1 92 | 93 | if (loc' >= x1 && loc' < x2) 94 | then return (n, n + 1) 95 | else go mi $ n + 1 96 | 97 | ----------------------------------------------------------------------- 98 | -- Histograms 99 | ----------------------------------------------------------------------- 100 | 101 | data THistogram = THistogram 102 | { histVals :: TArray Int Word64 103 | } 104 | 105 | instance ToStateJSON THistogram where 106 | toStateJSON h = do 107 | assocs <- getAssocs $ histVals h 108 | return $ toJSON assocs 109 | 110 | -- | 111 | -- Create a new Histogram with the specified number of bins. 112 | mkTHistogram 113 | :: Int -- ^ number of bins 114 | -> STM THistogram 115 | mkTHistogram bins = THistogram <$> newArray (0, bins - 1) 0 116 | 117 | -- | 118 | -- Increment the histogram by 1 at the given location. 119 | histInc :: HasLocation l => THistogram -> l -> STM () 120 | histInc = histMod (\x -> x + 1) 121 | 122 | histDec :: HasLocation l => THistogram -> l -> STM () 123 | histDec = histMod (\x -> x - 1) 124 | 125 | histMod :: HasLocation l => (Word64 -> Word64) -> THistogram -> l -> STM () 126 | histMod f h l = do 127 | (minBin, maxBin) <- getBounds a 128 | 129 | let 130 | l' = unLocation . toLocation $ l 131 | idx = max minBin $ min maxBin $ minBin + (floor $ l' * (fromIntegral maxBin + 1)) 132 | 133 | readArray a idx >>= \v -> writeArray a idx $! f v -- this should be strict 134 | where 135 | a = histVals h 136 | 137 | newtype Histogram = Histogram { unHistogram :: IA.Array Int Word64 } 138 | 139 | instance Binary Histogram where 140 | put (Histogram arr) = put arr 141 | get = Histogram <$> get 142 | 143 | mkHistogram :: Int -> Histogram 144 | mkHistogram bins = Histogram $ IA.listArray (0, bins - 1) $ repeat 0 145 | 146 | freezeHistogram :: THistogram -> STM Histogram 147 | freezeHistogram th = Histogram <$> (freeze . histVals) th 148 | 149 | thawHistogram :: Histogram -> STM THistogram 150 | thawHistogram h = THistogram <$> (thaw . unHistogram) h 151 | -------------------------------------------------------------------------------- /src/lib/Time.hs: -------------------------------------------------------------------------------- 1 | 2 | module Time ( 3 | Timestamp, getTime, 4 | Timediff, timeDiff, timeDiffSeconds 5 | ) where 6 | 7 | import Data.Aeson 8 | import Data.Time.Clock.POSIX ( getPOSIXTime ) 9 | 10 | newtype Timestamp = Timestamp { unTs :: Double } deriving ( Show ) 11 | 12 | getTime :: IO Timestamp 13 | getTime = (Timestamp . realToFrac) `fmap` getPOSIXTime 14 | 15 | newtype Timediff = Timediff { timeDiffSeconds :: Double } deriving ( Show ) 16 | 17 | timeDiff :: Timestamp -> Timestamp -> Timediff 18 | timeDiff ts1 ts2 = Timediff $ (unTs ts2) - (unTs ts1) 19 | 20 | instance ToJSON Timediff where 21 | toJSON td = toJSON $ timeDiffSeconds td 22 | -------------------------------------------------------------------------------- /src/lib/Types.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Types ( 5 | Id, unId, mkId', randomId, HasId(..), 6 | NodeInfo(..), 7 | 8 | -- * Locations 9 | HasLocation(..), Location, mkLocation, toLocation, 10 | unLocation, rightOf, 11 | LocDistance, unDistance, locDist, absLocDist, scaleDist, locMove, 12 | 13 | -- * state aware serialization 14 | ToStateJSON(..) 15 | ) where 16 | 17 | import Control.Applicative ( pure, (<$>), (<*>) ) 18 | import Control.Concurrent.STM 19 | import Control.Monad ( mzero ) 20 | import Data.Aeson 21 | import Data.Binary 22 | import Data.Binary.Get 23 | import Data.Binary.Put 24 | import Data.Bits ( shiftL ) 25 | import qualified Data.ByteString as BS 26 | import qualified Data.ByteString.Base16 as HEX 27 | import qualified Data.ByteString.Char8 as BSC 28 | import Data.Ratio ( (%) ) 29 | import Data.Text.Encoding ( decodeUtf8, encodeUtf8 ) 30 | import System.Random ( RandomGen, random ) 31 | import Test.QuickCheck 32 | 33 | ---------------------------------------------------------------------- 34 | -- STM aware serialization 35 | ---------------------------------------------------------------------- 36 | 37 | class ToStateJSON a where 38 | toStateJSON :: a -> STM Value 39 | 40 | instance ToStateJSON a => ToStateJSON [a] where 41 | toStateJSON xs = toJSON <$> mapM toStateJSON xs 42 | 43 | instance ToStateJSON a => ToStateJSON (TVar a) where 44 | toStateJSON v = readTVar v >>= toStateJSON 45 | 46 | ---------------------------------------------------------------------- 47 | -- Node IDs 48 | ---------------------------------------------------------------------- 49 | 50 | -- | 51 | -- An 256 bit identifier. 52 | newtype Id = Id { unId :: BS.ByteString } deriving ( Eq, Ord ) 53 | 54 | class HasId a where 55 | getId :: a -> Id 56 | 57 | instance Binary Id where 58 | put = putByteString . unId 59 | get = Id <$> getByteString 32 60 | 61 | instance Show Id where 62 | show nid = BSC.unpack (HEX.encode $ unId nid) 63 | 64 | instance FromJSON Id where 65 | parseJSON (String s) = pure $ Id $ fst (HEX.decode $ encodeUtf8 s) 66 | parseJSON _ = mzero 67 | 68 | instance ToJSON Id where 69 | toJSON (Id bs) = toJSON $ decodeUtf8 $ HEX.encode bs 70 | 71 | instance HasLocation Id where 72 | hasLocToInteger = idToInteger 73 | hasLocMax = Id $ BS.replicate 32 255 74 | 75 | idToInteger :: Id -> Integer 76 | idToInteger (Id bs) = BS.foldl' (\i bb -> (i `shiftL` 8) + fromIntegral bb) 0 bs 77 | 78 | mkId' :: BS.ByteString -> Id 79 | mkId' bs 80 | | BS.length bs /= 32 = error "mkId': expected 32 bytes" 81 | | otherwise = Id bs 82 | 83 | randomId :: RandomGen g => g -> (Id, g) 84 | randomId g = let (bs, Just g') = BS.unfoldrN 32 (Just . random) g in (mkId' bs, g') 85 | 86 | ------------------------------------------------------------------------------------------- 87 | -- Locations 88 | ------------------------------------------------------------------------------------------- 89 | 90 | class HasLocation a where 91 | hasLocToInteger :: a -> Integer 92 | hasLocMax :: a 93 | 94 | newtype Location = Location { unLocation :: Rational } deriving ( Eq, Show ) 95 | 96 | instance ToJSON Location where 97 | toJSON (Location l) = toJSON $ (fromRational l :: Double) 98 | 99 | instance Arbitrary Location where 100 | arbitrary = do 101 | d <- arbitrary 102 | if d > 0 103 | then choose (0, d - 1) >>= (\n -> return $ Location (n % d)) 104 | else return $ Location (0 % 1) 105 | 106 | mkLocation :: Real a => a -> Location 107 | mkLocation l 108 | | l < 0 || l >= 1 = error "mkLocation: location must be in [0..1)" 109 | | otherwise = Location $ toRational l 110 | 111 | -- | 112 | -- Determines if the shortes path on the circle goes to the right. 113 | rightOf :: Location -> Location -> Bool 114 | rightOf (Location l1) (Location l2) 115 | | l1 < l2 = l2 - l1 >= (1 % 2) 116 | | otherwise = l1 - l2 < (1 % 2) 117 | 118 | toLocation :: HasLocation a => a -> Location 119 | toLocation x = Location $ (hasLocToInteger x) % (1 + (hasLocToInteger $ hasLocMax `asTypeOf` x)) 120 | 121 | locMove :: Location -> LocDistance -> Location 122 | locMove (Location l) (LocDistance d) 123 | | l' >= 1 = Location $ l' - 1 124 | | l' < 0 = Location $ l' + 1 125 | | otherwise = Location l' 126 | where 127 | l' = l + d 128 | 129 | -- | 130 | -- Distance between two locations, always in [-0.5, 0.5]. 131 | newtype LocDistance = LocDistance { unDistance :: Rational } deriving ( Eq, Ord ) 132 | 133 | instance Show LocDistance where 134 | show (LocDistance d) = show (fromRational d :: Float) 135 | 136 | instance Arbitrary LocDistance where 137 | arbitrary = locDist <$> arbitrary <*> arbitrary 138 | 139 | absLocDist :: Location -> Location -> LocDistance 140 | absLocDist (Location l1) (Location l2) = LocDistance $ (min d (1 - d)) where 141 | d = if l1 > l2 then l1 - l2 else l2 - l1 142 | 143 | locDist :: Location -> Location -> LocDistance 144 | locDist ll1@(Location l1) ll2@(Location l2) = LocDistance $ f * (min d (1 - d)) where 145 | d = if l1 > l2 then l1 - l2 else l2 - l1 146 | f = if ll1 `rightOf` ll2 then 1 else (-1) 147 | 148 | scaleDist :: LocDistance -> Rational -> LocDistance 149 | scaleDist (LocDistance d) f 150 | | f > 1 = error "scaleDist: factor > 1" 151 | | otherwise = LocDistance $ d * f 152 | 153 | ---------------------------------------------------------------------- 154 | -- Node Info 155 | ---------------------------------------------------------------------- 156 | 157 | -- | 158 | -- The node information which can be exchanged between peers. 159 | data NodeInfo a = NodeInfo 160 | { nodeId :: Id -- ^ the globally unique node ID of 256 bits 161 | , nodeAddresses :: [a] 162 | } deriving ( Eq, Show ) 163 | 164 | instance Binary a => Binary (NodeInfo a) where 165 | put ni = put (nodeId ni) >> put (nodeAddresses ni) 166 | get = NodeInfo <$> get <*> get 167 | 168 | instance FromJSON a => FromJSON (NodeInfo a) where 169 | parseJSON (Object v) = NodeInfo 170 | <$> v .: "id" 171 | <*> v .: "addresses" 172 | parseJSON _ = mzero 173 | 174 | instance ToJSON a => ToJSON (NodeInfo a) where 175 | toJSON ni = object 176 | [ "id" .= nodeId ni 177 | , "addresses" .= nodeAddresses ni 178 | ] 179 | -------------------------------------------------------------------------------- /src/lib/Utils.hs: -------------------------------------------------------------------------------- 1 | 2 | module Utils ( 3 | bsFromStrict, bsToStrict 4 | ) where 5 | 6 | import qualified Data.ByteString as BS 7 | import qualified Data.ByteString.Lazy as BSL 8 | 9 | -- | 10 | -- Can't use the one from BSL as it's not available with GHC 7.4. 11 | bsFromStrict :: BS.ByteString -> BSL.ByteString 12 | bsFromStrict bs = BSL.fromChunks [bs] 13 | 14 | -- | 15 | -- Can't use the one from BSL as it's not available with GHC 7.4. 16 | bsToStrict :: BSL.ByteString -> BS.ByteString 17 | bsToStrict = BS.concat . BSL.toChunks 18 | -------------------------------------------------------------------------------- /src/main/Main.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE OverloadedStrings #-} 3 | 4 | module Main ( main ) where 5 | 6 | import Control.Applicative ( (<$>) ) 7 | import Control.Concurrent ( forkIO ) 8 | import Control.Concurrent.STM 9 | import Control.Monad ( unless, void ) 10 | import qualified Data.Aeson as JSON 11 | import qualified Data.ByteString.Lazy as BSL 12 | import qualified Data.Configurator as CFG 13 | import qualified Data.Configurator.Types as CFG 14 | import Network ( withSocketsDo ) 15 | import Network.Wai.Handler.Warp as Warp 16 | import System.Directory ( createDirectoryIfMissing, doesFileExist, getAppUserDataDirectory ) 17 | import System.Environment ( getArgs ) 18 | import System.FilePath ( () ) 19 | import System.Posix.Signals (installHandler, Handler(Catch), sigINT, sigTERM) 20 | import System.Random ( getStdRandom ) 21 | 22 | import Paths_ads 23 | 24 | import Freenet 25 | import Freenet.Chk 26 | import Freenet.Fproxy as FP 27 | import Freenet.Rijndael as RD 28 | import Freenet.Ssk 29 | import Freenet.Store 30 | import Logging as LOG 31 | import Net 32 | import Node 33 | import Peers 34 | import RestApi 35 | import Types 36 | 37 | logI :: String -> IO () 38 | logI = infoM "main" 39 | 40 | logW :: String -> IO () 41 | logW = warningM "main" 42 | 43 | logE :: String -> IO () 44 | logE = errorM "main" 45 | 46 | sigHandler :: TVar Bool -> IO () 47 | sigHandler s = do 48 | infoM "main" "shutdown on sigint/sigterm" 49 | atomically $ writeTVar s True 50 | 51 | mkFn :: Node a -> CFG.Config -> IO (Freenet a) 52 | mkFn node cfg = do 53 | dsdir <- CFG.require cfg "datastore.directory" 54 | createDirectoryIfMissing True dsdir 55 | 56 | chkCount <- CFG.require cfg "datastore.chk-count" 57 | chkStore <- mkStoreFile (dsdir "store-chk") chkCount 58 | 59 | sskCount <- CFG.require cfg "datastore.ssk-count" 60 | sskStore <- mkStoreFile (dsdir "store-ssk") sskCount 61 | 62 | atomically $ mkFreenet node Nothing chkStore sskStore 63 | 64 | -- | 65 | -- Assembles the HTTP server and starts it up. 66 | startHttpServer :: (PeerAddress a, JSON.ToJSON a) => CFG.Config -> Node a -> IO () 67 | startHttpServer cfg node = do 68 | -- start HTTP Server 69 | startRestApi (CFG.subconfig "node.http" cfg) node 70 | 71 | -- start fproxy 72 | fpport <- CFG.lookup cfg "fproxy.port" :: IO (Maybe Int) 73 | case fpport of 74 | Nothing -> return () 75 | Just p -> error "need resolver for fproxy" -- void $ forkIO $ Warp.run p $ FP.fproxy node 76 | 77 | main :: IO () 78 | main = withSocketsDo $ do 79 | infoM "main" "Starting up..." 80 | 81 | RD.initRijndael 82 | 83 | -- find and maybe create home directory 84 | args <- getArgs 85 | appDir <- if null args then getAppUserDataDirectory "ads" else return (head args) 86 | createDirectoryIfMissing True appDir 87 | 88 | -- install signal handler for shutdown handling 89 | shutdown <- newTVarIO False 90 | void $ installHandler sigINT (Catch $ sigHandler shutdown) Nothing 91 | void $ installHandler sigTERM (Catch $ sigHandler shutdown) Nothing 92 | 93 | LOG.initLogging 94 | 95 | -- load (and maybe create) configuration file 96 | let 97 | cfgFile = appDir "config" 98 | infoFile = appDir "identity" 99 | 100 | doesFileExist cfgFile >>= \e -> unless e $ do 101 | dfile <- getDataFileName "default-config" 102 | BSL.readFile dfile >>= BSL.writeFile cfgFile 103 | 104 | cfg <- CFG.load [CFG.Required $ appDir "config"] 105 | 106 | -- read (maybe create) identity 107 | mNodeInfo <- doesFileExist infoFile >>= \e -> 108 | if e 109 | then JSON.decode <$> BSL.readFile infoFile 110 | else do 111 | nid <- getStdRandom randomId 112 | let result = NodeInfo nid ([] :: [TcpAddress]) 113 | BSL.writeFile infoFile $ JSON.encode result 114 | return $ Just result 115 | 116 | let fnCfg = CFG.subconfig "freenet" cfg 117 | 118 | -- start our node 119 | case mNodeInfo of 120 | Nothing -> do 121 | logE $ "problem with " ++ infoFile 122 | 123 | Just nodeInfo -> do 124 | logI $ "node identity is " ++ (show $ nodeId nodeInfo) 125 | node <- mkNode nodeInfo 126 | fn <- mkFn node fnCfg 127 | 128 | startHttpServer cfg node 129 | 130 | readPeers appDir >>= \ps -> case ps of 131 | Left e -> logW ("error parsing peers file: " ++ e) 132 | Right peers -> do 133 | logI ("got " ++ show (length peers) ++ " peers") 134 | atomically $ mapM_ (mergeNodeInfo node) peers 135 | 136 | nodeListen (CFG.subconfig "node.listen" cfg) node 137 | 138 | -- wait for shutdown 139 | atomically $ readTVar shutdown >>= check 140 | shutdownFreenet fn 141 | -------------------------------------------------------------------------------- /src/sandbox/Main.hs: -------------------------------------------------------------------------------- 1 | 2 | module Main ( main ) where 3 | 4 | import Sandbox 5 | 6 | main :: IO () 7 | main = runSandbox $ do 8 | n1 <- randomNode 9 | n2 <- randomNode 10 | n1 `addPeer` n2 11 | delay 10000000 12 | -------------------------------------------------------------------------------- /src/sandbox/Network.hs: -------------------------------------------------------------------------------- 1 | 2 | module Network ( 3 | MsgPassNet, mkMsgPassNet, 4 | 5 | MsgPassAddress, mkAddress 6 | ) where 7 | 8 | import Control.Concurrent ( forkIO ) 9 | import Control.Concurrent.STM 10 | import Control.Monad ( liftM2, void ) 11 | import Data.Conduit 12 | import Data.Conduit.TQueue 13 | import qualified Data.HashMap.Strict as HMap 14 | import Data.IORef 15 | 16 | import Message 17 | import Node 18 | import Peers 19 | 20 | type SBMessage = Message MsgPassAddress 21 | type MpIO = (Source IO SBMessage, Sink SBMessage IO ()) 22 | 23 | data MsgPassAddress = Addr 24 | { aId :: ! Int 25 | , aNet :: ! MsgPassNet 26 | , aNode :: ! (Node MsgPassAddress) 27 | } 28 | 29 | instance Eq MsgPassAddress where 30 | (==) a1 a2 = aId a1 == aId a2 31 | 32 | instance Show MsgPassAddress where 33 | show a = "MsgPassAddress { id = " ++ (show $ aId a) ++ " }" 34 | 35 | instance PeerAddress MsgPassAddress where 36 | connectPeer p k = do 37 | addrs <- atomically . readTVar $ peerAddresses p 38 | 39 | case addrs of 40 | [] -> k $ Left "no addresses on peer" 41 | (a : _) -> do 42 | (nsrc, psink) <- entangledPair 2 43 | (psrc, nsink) <- entangledPair 2 44 | void $ forkIO $ peerConnecting (aNode a) (psrc, psink) 45 | k $ Right (nsrc, nsink) 46 | 47 | data MsgPassNet = MsgPassNet 48 | { msgPassAddrs :: IORef (HMap.HashMap Int (IO MpIO)) 49 | , msgPassNextId :: IORef Int 50 | } 51 | 52 | mkMsgPassNet :: IO MsgPassNet 53 | mkMsgPassNet = liftM2 MsgPassNet (newIORef HMap.empty) (newIORef 0) 54 | 55 | mkAddress :: Node MsgPassAddress -> MsgPassNet -> IO MsgPassAddress 56 | mkAddress node net = do 57 | aid <- atomicModifyIORef' (msgPassNextId net) $ \oid -> (oid + 1, oid) 58 | return $! Addr aid net node 59 | -------------------------------------------------------------------------------- /src/sandbox/Sandbox.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 3 | 4 | module Sandbox ( 5 | Sandbox, runSandbox, delay, 6 | 7 | -- * Nodes 8 | randomNode, addPeer 9 | ) where 10 | 11 | import Control.Concurrent ( threadDelay ) 12 | import Control.Concurrent.STM 13 | import Control.Monad.RWS.Strict 14 | import System.Random ( StdGen, mkStdGen ) 15 | 16 | import Logging 17 | import Network 18 | import Node 19 | import Types 20 | 21 | newtype Sandbox a = Sandbox { unSandbox :: RWST Environment () State IO a } 22 | deriving ( Applicative, Functor, Monad, MonadReader Environment, MonadState State, MonadIO ) 23 | 24 | type SandboxNode = Node MsgPassAddress 25 | 26 | data Environment = Env 27 | { envNet :: ! MsgPassNet 28 | } 29 | 30 | data State = State 31 | { stNodes :: ! [SandboxNode] 32 | , stRng :: ! StdGen 33 | } 34 | 35 | runSandbox :: Sandbox a -> IO a 36 | runSandbox s = do 37 | net <- mkMsgPassNet 38 | initLogging 39 | 40 | let 41 | env = Env net 42 | st = State [] (mkStdGen 23) 43 | 44 | fmap fst $ evalRWST (unSandbox s) env st 45 | 46 | delay :: Int -> Sandbox () 47 | delay d = liftIO $ threadDelay d 48 | 49 | liftSTM :: STM a -> Sandbox a 50 | liftSTM = liftIO . atomically 51 | 52 | randomNode :: Sandbox SandboxNode 53 | randomNode = do 54 | nid <- get >>= \s -> let (i, g') = randomId (stRng s) in put s { stRng = g' } >> return i 55 | node <- liftIO $ mkNode (NodeInfo nid []) 56 | na <- reader envNet >>= liftIO . (mkAddress node) 57 | liftSTM $ modifyTVar' (nodeIdentity node) $ \oid -> oid { nodeAddresses = [na] } 58 | liftIO . putStrLn $ "created node " ++ show nid 59 | return node 60 | 61 | addPeer 62 | :: SandboxNode -- ^ the node whose peer list gets the other node added 63 | -> SandboxNode -- ^ the node to announce to the first node 64 | -> Sandbox () 65 | addPeer n1 n2 = liftIO $ atomically $ do 66 | oid <- readTVar $ nodeIdentity n2 67 | mergeNodeInfo n1 oid 68 | -------------------------------------------------------------------------------- /src/tests/Main.hs: -------------------------------------------------------------------------------- 1 | 2 | module Main ( main ) where 3 | 4 | import Test.Framework 5 | 6 | import Properties 7 | import StoreTest 8 | 9 | main :: IO () 10 | main = defaultMain 11 | [ testGroup "properties" testProperties 12 | , storeTests 13 | ] 14 | -------------------------------------------------------------------------------- /src/tests/Properties.hs: -------------------------------------------------------------------------------- 1 | 2 | module Properties ( testProperties ) where 3 | 4 | import Test.Framework 5 | 6 | import Properties.CHK 7 | import Properties.Statistics 8 | import Properties.Types 9 | 10 | testProperties :: [Test] 11 | testProperties = 12 | [ testGroup "CHK" chkTests 13 | , testGroup "stats" statsTests 14 | , testGroup "types" typeTests 15 | ] 16 | 17 | -------------------------------------------------------------------------------- /src/tests/Properties/CHK.hs: -------------------------------------------------------------------------------- 1 | 2 | module Properties.CHK ( chkTests ) where 3 | 4 | import Data.ByteString as BS 5 | import Data.Word ( Word8 ) 6 | import Test.Framework 7 | import Test.Framework.Providers.QuickCheck2 8 | 9 | import Freenet.Chk 10 | import Freenet.Types 11 | 12 | chkTests :: [Test] 13 | chkTests = 14 | [ testProperty "d(e(x)) == x" dex 15 | ] 16 | 17 | dex :: Key -> [Word8] -> Bool 18 | dex key ds = case decryptChk (encryptChk d key) key of 19 | Left _ -> False 20 | Right (d', len) -> d == BS.take len d' 21 | where 22 | d = BS.pack ds 23 | -------------------------------------------------------------------------------- /src/tests/Properties/Statistics.hs: -------------------------------------------------------------------------------- 1 | 2 | module Properties.Statistics ( statsTests ) where 3 | 4 | import Control.Applicative ( (<$>) ) 5 | import Control.Concurrent.STM 6 | import Test.Framework 7 | import Test.Framework.Providers.QuickCheck2 8 | import Test.QuickCheck 9 | import Test.QuickCheck.Monadic 10 | 11 | import Statistics 12 | import Types 13 | 14 | statsTests :: [Test] 15 | statsTests = 16 | [ testProperty "estimator sorted" estSorted 17 | , testProperty "est. loc in bounds" estLocInBounds 18 | ] 19 | 20 | estSorted :: [(Location, Double)] -> Property 21 | estSorted input = monadicIO $ do 22 | el <- run $ atomically $ do 23 | est <- mkTEstimator 32 0.2 0.0 24 | mapM_ (uncurry (updateTEstimator est)) input 25 | map fst <$> teToList est 26 | 27 | let 28 | locs = map mkLocation el 29 | right = zipWith rightOf (tail locs) locs 30 | 31 | assert $ and right 32 | 33 | estLocInBounds :: [(Location, Double)] -> Property 34 | estLocInBounds input = monadicIO $ do 35 | el <- run $ atomically $ do 36 | est <- mkTEstimator 32 0.2 0.0 37 | mapM_ (uncurry (updateTEstimator est)) input 38 | map fst <$> teToList est 39 | 40 | assert $ minimum el >= 0.0 41 | assert $ maximum el < 1.0 42 | -------------------------------------------------------------------------------- /src/tests/Properties/Types.hs: -------------------------------------------------------------------------------- 1 | 2 | module Properties.Types ( typeTests ) where 3 | 4 | import Test.Framework 5 | import Test.Framework.Providers.QuickCheck2 6 | import Test.QuickCheck 7 | 8 | import Data.Ratio 9 | 10 | import Types 11 | 12 | typeTests :: [Test] 13 | typeTests = 14 | [ testProperty "distance in bounds" distInBounds 15 | , testProperty "abs dist in bounds" absDistInBounds 16 | , testProperty "loc rightOf" propRightOf 17 | , testProperty "dist sign" distSign 18 | , testProperty "arbitrary distance" arbDist 19 | , testProperty "locMove bounds" locMoveBounds 20 | , testProperty "undo move" locMoveReverse 21 | ] 22 | 23 | distInBounds :: Location -> Location -> Bool 24 | distInBounds l1 l2 = d >= (-1 % 2) && d <= (1 % 2) where 25 | d = unDistance $ l1 `locDist` l2 26 | 27 | propRightOf :: Location -> Location -> Property 28 | propRightOf l1 l2 = l1 /= l2 ==> r1 `xor` r2 where 29 | r1 = l1 `rightOf` l2 30 | r2 = l2 `rightOf` l1 31 | xor p q = (p || q) && not (p && q) 32 | 33 | distSign :: Location -> Location -> Bool 34 | distSign l1 l2 = d >= 0 && (l1 `rightOf` l2) || d <= 0 && (l2 `rightOf` l1) where 35 | d = unDistance $ l1 `locDist` l2 36 | 37 | absDistInBounds :: Location -> Location -> Bool 38 | absDistInBounds l1 l2 = d >= 0 && d <= (1 % 2) where 39 | d = unDistance $ l1 `absLocDist` l2 40 | 41 | arbDist :: LocDistance -> Bool 42 | arbDist d = let d' = unDistance d in d' >= (-0.5) && d' <= 0.5 43 | 44 | locMoveBounds :: Location -> LocDistance -> Property 45 | locMoveBounds l d = counterexample (show $ (l, unDistance d, l')) $ l' >= 0 && l' < 1 where 46 | l' = unLocation $ l `locMove` d 47 | 48 | locMoveReverse :: Location -> LocDistance -> Property 49 | locMoveReverse l d = abs (unDistance d) /= (1 % 2) ==> counterexample 50 | ("d=" ++ show (unDistance d) ++ ", d'=" ++ show (unDistance d')) 51 | (d' == d) 52 | where 53 | l' = l `locMove` d 54 | d' = locDist l' l 55 | 56 | 57 | -------------------------------------------------------------------------------- /src/tests/StoreTest.hs: -------------------------------------------------------------------------------- 1 | 2 | module StoreTest ( storeTests ) where 3 | 4 | import Data.ByteString as BS 5 | import System.IO.Temp 6 | import qualified Test.Framework as TF 7 | import Test.Framework.Providers.HUnit 8 | import Test.HUnit 9 | 10 | import Freenet.Store 11 | import Freenet.Types 12 | 13 | storeTests :: TF.Test 14 | storeTests = TF.testGroup "store" 15 | [ testCase "create store" testCreate 16 | ] 17 | 18 | testCreate :: Assertion 19 | testCreate = withSystemTempDirectory "ads-test" $ \tmpdir -> do 20 | sf <- mkStoreFile (tmpdir ++ "dummy-store") 1000 :: IO (StoreFile DummyStorable) 21 | return () 22 | 23 | dummySize :: Int 24 | dummySize = 12345 25 | 26 | newtype DummyStorable = DS BS.ByteString 27 | 28 | instance DataBlock DummyStorable where 29 | 30 | 31 | instance StorePersistable DummyStorable where 32 | storeSize _ = dummySize 33 | -------------------------------------------------------------------------------- /webUi/css/node-status.css: -------------------------------------------------------------------------------- 1 | 2 | .bar rect { 3 | fill: steelblue; 4 | shape-rendering: crispEdges; 5 | } 6 | 7 | .bar text { 8 | fill: #fff; 9 | } 10 | 11 | .axis path, .axis line { 12 | fill: none; 13 | stroke: #000; 14 | shape-rendering: crispEdges; 15 | } 16 | 17 | .estimator circle { 18 | fill: none; 19 | stroke: #449; 20 | } 21 | 22 | .estimator polygon { 23 | fill: #77f; 24 | opacity: 0.5; 25 | } 26 | -------------------------------------------------------------------------------- /webUi/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/waldheinz/ads/7695a157d76f35849351b2e039890eae1b419088/webUi/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /webUi/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/waldheinz/ads/7695a157d76f35849351b2e039890eae1b419088/webUi/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /webUi/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/waldheinz/ads/7695a157d76f35849351b2e039890eae1b419088/webUi/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /webUi/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Bootstrap 101 Template 7 | 8 | 9 | 10 | 11 | 12 | 13 | 34 | 35 |
36 |

Home

37 |
38 | 39 | 40 | 41 | 42 | 43 | 44 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /webUi/js/node-status.js: -------------------------------------------------------------------------------- 1 | 2 | function appendStatus(container) { 3 | container.append($('

').text('Status')); 4 | 5 | container.append($('

').text('Peers')); 6 | var peerStatus = $('

'); 7 | container.append(peerStatus); 8 | appendPeerStatus(peerStatus); 9 | 10 | container.append($('

').text('Data Stores')); 11 | 12 | container.append($('

').text('CHK Store')); 13 | var chkStatus = $('

'); 14 | container.append(chkStatus); 15 | appendStoreStatus("api/status/store/chk", chkStatus); 16 | 17 | container.append($('

').text('SSK Store')); 18 | var sskStatus = $('

'); 19 | container.append(sskStatus); 20 | 21 | appendStoreStatus("api/status/store/ssk", sskStatus); 22 | } 23 | 24 | function appendPeerStatus(container) { 25 | var list = $('

    ').addClass('list-group'); 26 | container.append(list); 27 | 28 | $.getJSON("api/status/peers").then(function(json) { 29 | var conn = json.connected; 30 | 31 | for (var i=0; i < conn.length; i++) { 32 | var p = conn[i]; 33 | 34 | var li = $('
  • ').addClass('list-group-item'); 35 | list.append(li); 36 | li.append($('

    ').text(p.peer.id)); 37 | li.append($('

    ').text( 38 | "connected since " + p.connectedFor.toFixed(1) + "s")); 39 | li.append(showEstimator(p.peer.psuccess)); 40 | } 41 | }); 42 | } 43 | 44 | function showEstimator(data) { 45 | var width = 128; 46 | var height = 128; 47 | var cx = width / 2; 48 | var cy = height / 2; 49 | 50 | var result = document.createElementNS(d3.ns.prefix.svg, 'svg'); 51 | var svg = d3.select(result); 52 | 53 | svg 54 | .attr("width", width) 55 | .attr("height", height) 56 | .attr("class", "estimator"); 57 | 58 | 59 | for (var i=0; i < 5; i++) { 60 | svg.append("circle") 61 | .attr("cx", cx) 62 | .attr("cy", cy) 63 | .attr("r", (i+1) / 10 * width); 64 | } 65 | 66 | 67 | var points = ""; 68 | 69 | for (var i=0; i < data.length; i++) { 70 | var pt = data[i][1]; 71 | var r = 64 * pt[1]; 72 | var px = cx + r * Math.cos(2 * Math.PI * pt[0]); 73 | var py = cy + r * Math.sin(2 * Math.PI * pt[0]); 74 | 75 | points += " " + px + "," + py; 76 | } 77 | 78 | svg.append("svg:polygon") 79 | .attr("points", points); 80 | 81 | return result; 82 | 83 | } 84 | 85 | function appendStoreStatus(url, container) { 86 | $.getJSON(url).then(function (json) { 87 | container.append(makeHist(json.histogram)); 88 | 89 | var used = 0; 90 | 91 | for (var i=0; i < json.histogram.length; i++) { 92 | used += json.histogram[i][1]; 93 | } 94 | var pct = 100 * used / json.capacity; 95 | 96 | var bar = $('

    ') 97 | .addClass('progress-bar') 98 | .css('width', pct + '%'); 99 | 100 | container.append($('
    ').addClass('progress') 101 | .append(bar)); 102 | }); 103 | } 104 | 105 | function makeHist(data) { 106 | var margin = {top: 10, right: 30, bottom: 30, left: 30}, 107 | width = 452 + margin.left + margin.right, 108 | height = 128 - margin.top - margin.bottom; 109 | 110 | var x = d3.scale.linear() 111 | .domain([0, 1]) 112 | .range([0, width]); 113 | 114 | var y = d3.scale.linear() 115 | .domain([0, d3.max(data, function(d) { 116 | return d[1]; 117 | })]) 118 | .range([height, 0]); 119 | 120 | var xAxis = d3.svg.axis() 121 | .scale(x) 122 | .orient("bottom"); 123 | 124 | var result = document.createElementNS(d3.ns.prefix.svg, 'svg'); 125 | var svg = d3.select(result); 126 | 127 | svg.attr("width", width + margin.left + margin.right) 128 | .attr("height", height + margin.top + margin.bottom) 129 | .append("g") 130 | .attr("transform", "translate(" + margin.left + "," + margin.top + ")"); 131 | 132 | var bar = svg.selectAll(".bar") 133 | .data(data) 134 | .enter().append("g") 135 | .attr("class", "bar") 136 | .attr("transform", function(d) { 137 | return "translate(" + x(d[0] / 256) + "," + y(d[1]) + ")"; 138 | }); 139 | 140 | bar.append("rect") 141 | .attr("x", 1) 142 | .attr("width", 1) 143 | .attr("height", function(d) { 144 | return height - y(d[1]); 145 | }); 146 | 147 | svg.append("g") 148 | .attr("class", "x axis") 149 | .attr("transform", "translate(0," + height + ")") 150 | .call(xAxis); 151 | 152 | return result; 153 | } 154 | --------------------------------------------------------------------------------