├── .github └── workflows │ └── go.yml ├── .gitignore ├── LICENSE ├── README.md ├── bench ├── full │ ├── decode_test.go │ ├── encode_test.go │ └── recode_test.go └── systematic │ ├── decoder_test.go │ └── encoder_test.go ├── data.go ├── data_test.go ├── errors.go ├── example ├── full │ └── main.go └── systematic │ └── main.go ├── full ├── decoder.go ├── decoder_test.go ├── encoder.go ├── encoder_test.go ├── recoder.go └── recoder_test.go ├── go.mod ├── go.sum ├── img ├── benchmark_full_decoder.png ├── benchmark_full_encoder.png ├── benchmark_full_recoder.png ├── benchmark_systematic_decoder.png ├── benchmark_systematic_encoder.png ├── logo.png └── systematic_rlnc_example.png ├── matrix ├── decoder_state.go ├── matrix.go ├── matrix_bench_test.go └── matrix_test.go └── systematic ├── decoder.go ├── decoder_test.go ├── encoder.go └── encoder_test.go /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Set up Go 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: 1.16 20 | 21 | - name: Test 22 | run: go test -v -cover ./... 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | recovered.png 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kodr 2 | 3 | ![kodr-logo](./img/logo.png) 4 | 5 | Random Linear Network Coding 6 | 7 | ## Motivation 8 | 9 | For sometime now I've been exploring **R**andom **L**inear **N**etwork **C**oding & while looking for implementation(s) of RLNC-based schemes I didn't find stable & maintained library in *Golang*, which made me take up this venture of writing **kodr**, which I plan to maintain & keep updated as I keep learning of new RLNC schemes & possible application domains of RLNC. 10 | 11 | For all the RLNC-based schemes I implement in **kodr** i.e. _{full RLNC, on-the-fly RLNC, sparse RLNC, generational RLNC, Caterpillar RLNC ...}_, I write respective examples on how exposed API can be used for *encoding, recoding & decoding* binary data. 12 | 13 | ## Background 14 | 15 | For learning about RLNC you may want to go through my [post](https://itzmeanjan.in/pages/rlnc-in-depth.html). In **kodr**, I perform all finite field operations on GF(2\*\*8) --- which is seemingly a good fit, because I consider each byte to be a symbol of RLNC, which is a finite field elements --- 256 of them. Also speaking of performance & memory consumption, GF(2\*\*8) keeps a good balance between these. Working on higher field indeed decreases chance of ( randomly ) generating linearly dependent pieces, but requires more costly computation & if finite field operations are implemented in terms of addition/ multiplication tables then memory consumption increases to a great extent. On the other hand, working on GF(2) increases change of generating linearly dependent pieces, though with sophisticated design like Fulcrum codes, they can be proved to be beneficial. Another point is the higher the finite field, higher is the cost of storing random coding vectors --- because each element of coding vector ( read coding coefficient ) is a finite field element. 16 | 17 | ## Installation 18 | 19 | Assuming you have Golang (>=1.16) installed, add **kodr** as an dependency to your project, which uses *GOMOD* for dependency management purposes, by executing 20 | 21 | ```bash 22 | go get -u github.com/itzmeanjan/kodr/... 23 | ``` 24 | 25 | ## Testing 26 | 27 | After you clone this repo, you may want to run test cases 28 | 29 | ```bash 30 | go test -v -cover ./... 31 | ``` 32 | 33 | ## Benchmarking 34 | 35 | I write required testcases for benchmarking performance of {en, re, de}coder of implemented RLNC schemes, while I also present results after running those on consumer grade machines with configuration 36 | 37 | - `Intel(R) Core(TM) i3-5005U CPU @ 2.00GHz` 38 | - `Intel(R) Core(TM) i5-8279U CPU @ 2.40GHz` 39 | 40 | --- 41 | 42 | ### Full RLNC 43 | 44 | For benchmarking **encoder** of full RLNC, execute 45 | 46 | ```bash 47 | go test -run=xxx -bench=Encoder ./bench/full/ 48 | ``` 49 | 50 | > Coding speed at **~ 290MB/s** 51 | 52 | ![benchmark_full_encoder](./img/benchmark_full_encoder.png) 53 | 54 | Looking at full RLNC **recoder** performance 55 | 56 | ```bash 57 | go test -run=xxx -bench=Recoder ./bench/full/ 58 | ``` 59 | 60 | > Recoding speed at **~ 290MB/s** 61 | 62 | ![benchmark_full_recoder](./img/benchmark_full_recoder.png) 63 | 64 | And **decoder** performance denotes each round of full data reconstruction from N-many coded pieces taking `X second`, on average. 65 | 66 | > Note: It can be clearly understood that decoding complexity keeps increasing very fast, when using full RLNC with large data chunks. For decoding 2MB total chunk which is splitted into 256 pieces of equal sized byte slice, it takes ~6s. 67 | 68 | ![benchmark_full_decoder](./img/benchmark_full_decoder.png) 69 | 70 | --- 71 | 72 | ### Systematic RLNC 73 | 74 | Running benchmark tests on better hardware shows encoder performance improvement to quite a large extent 75 | 76 | > Average encoding speed **~660MB/s** 77 | 78 | ```bash 79 | go test -run=xxx -bench=Encoder ./bench/systematic 80 | ``` 81 | 82 | ![benchmark_systematic_encoder](img/benchmark_systematic_encoder.png) 83 | 84 | Systematic RLNC decoder has an advantage over full RLNC decoder because it may get some pieces which are actually uncoded, just augmented to be coded, so it doesn't need to process those pieces, rather it'll use uncoded pieces to decode other coded pieces faster. 85 | 86 | ```bash 87 | go test -run=xxx -bench=Decoder ./bench/systematic 88 | ``` 89 | 90 | > For decoding 1MB whole chunk, which is splitted into 512 pieces & coded, it takes quite long time --- growth rate of decoding time is pretty high as piece count keeps increasing. It's better not to increase piece count very much, rather piece size can increase, so that we pay relatively lower decoding cost. 91 | 92 | > Notice how whole chunk size increases to 2MB, but with small piece count decoding time stays afforable. 93 | 94 | ![benchmark_systematic_decoder](img/benchmark_systematic_decoder.png) 95 | 96 | ## Usage 97 | 98 | Examples demonstrating how to use API exposed by **kodr** for _( currently )_ supported RLNC schemes. 99 | 100 | > In each walk through, code snippets are prepended with line numbers, denoting actual line numbers in respective file. 101 | 102 | --- 103 | 104 | ### Full RLNC 105 | 106 | **Example:** `example/full/main.go` 107 | 108 | Let's start by seeding random number generator with current unix timestamp with nanosecond level precision. 109 | 110 | ```go 111 | 22| rand.Seed(time.Now().UnixNano()) 112 | ``` 113 | 114 | I read **kodr** [logo](#kodr), which is a PNG file, into in-memory byte array of length 3965 & compute SHA512 hash : `0xee9ec63a713ab67d82e0316d24ea646f7c5fb745ede9c462580eca5f` 115 | 116 | ```go 117 | 24| img := path.Join("..", "..", "img", "logo.png") 118 | 119 | ... 120 | 121 | 37| log.Printf("SHA512: 0x%s\n\n", hex.EncodeToString(sum)) 122 | ``` 123 | 124 | I decide to split it into 64 pieces ( each of equal length ) & perform full RLNC, resulting into 128 coded pieces. 125 | 126 | ```go 127 | 45| log.Printf("Coding %d pieces together\n", pieceCount) 128 | 46| enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 129 | 130 | ... 131 | 132 | 57| log.Printf("Coded into %d pieces\n", codedPieceCount) 133 | ``` 134 | 135 | Then I randomly drop 32 coded pieces, simulating these are lost/ dropped. I've 96 remaining pieces, which I recode into 192 coded pieces. I random shuffle those 192 coded pieces to simulate that their reception order can arbitrarily vary. Then I randomly drop 96 pieces, leaving me with other 96 pieces. 136 | 137 | ```go 138 | 59| for i := 0; i < int(droppedPieceCount); i++ { 139 | 140 | ... 141 | 142 | 98| log.Printf("Dropped %d pieces, remaining %d pieces\n\n", recodedPieceCount/2, len(recodedPieces)) 143 | ``` 144 | 145 | Now I create a decoder which expects to receive 64 linearly independent pieces so that it can fully construct back **kodr** logo. I've 96 pieces, with no idea whether they're linearly independent or not, still I start decoding. 146 | 147 | ```go 148 | 101| dec := full.NewFullRLNCDecoder(pieceCount) 149 | ``` 150 | 151 | Courageously I just add 64 coded pieces into decoder & hope all of those will be linearly independent --- turns out to be so. 152 | 153 | > This is the power of RLNC, where random coding coefficients do same job as other specially crafted codes. 154 | 155 | Just a catch, decoded data's length is more than 3965 bytes. 156 | 157 | ```go 158 | 124| log.Printf("Decoded into %d bytes\n", len(decoded_data)) // 3968 bytes 159 | ``` 160 | 161 | This is due to fact, I asked **kodr** to split original 3965 bytes into 64 pieces & code them together, but turns out 3965 is not properly divisible by 64, which is why **kodr** decided to append 3 extra bytes at end --- making it 3968 bytes. This way **kodr** splitted whole image into 64 equal sized pieces, where each piece size is 62 bytes. 162 | 163 | So, SHA512-ing first 3965 bytes of decoded data slice must be equal to `0xee9ec63a713ab67d82e0316d24ea646f7c5fb745ede9c462580eca5f` --- and it's so. 164 | 165 | ```go 166 | 131| log.Printf("First %d bytes of decoded data matches original %d bytes\n", len(data), len(data)) 167 | 168 | ... 169 | 170 | 137| log.Printf("SHA512: 0x%s\n", hex.EncodeToString(sum)) 171 | ``` 172 | 173 | Finally I write back reconstructed image into PNG file. 174 | 175 | ```go 176 | 139| if err := os.WriteFile("recovered.png", decoded_data[:len(data)], 0o644); err != nil { 177 | 178 | ... 179 | ``` 180 | 181 | For running this example 182 | 183 | ```bash 184 | # assuming you're in root directory of `kodr` 185 | cd example/full 186 | go run main.go 187 | ``` 188 | 189 | This should generate `example/full/recovered.png`, which is exactly same as `img/logo.png`. 190 | 191 | --- 192 | 193 | ### Systematic RLNC 194 | 195 | **Example: `example/systematic/main.go`** 196 | 197 | I start by seeding random number generator with device's nanosecond precision time 198 | 199 | ```go 200 | 46| rand.Seed(time.Now().UnixNano()) 201 | ``` 202 | 203 | I define one structure for storing randomly generated values, which I serialise to JSON. 204 | 205 | ```go 206 | 17| type Data struct { 207 | . FieldA uint `json:"fieldA"` 208 | . FieldB float64 `json:"fieldB"` 209 | . FieldC bool `json:"fieldC"` 210 | . FieldD []byte `json:"fieldD"` 211 | 22| } 212 | ``` 213 | 214 | For filling up this structure, I invoke random data generator 215 | 216 | ```go 217 | 48| data := randData() 218 | ``` 219 | 220 | I calculate SHA512 hash of JSON serialised data, which turns out to be `0x25c37651f7a567963a884ef04d7dc6df0901ab58ca28aa3eaf31097e5d9155d4` in this run. 221 | 222 | ```go 223 | 56| hasher := sha512.New512_256() 224 | 225 | . 226 | . 227 | 228 | 59| log.Printf("SHA512(original): 0x%s\n", hex.EncodeToString(sum)) 229 | ``` 230 | 231 | I decide to split serialised data into N-many pieces, each of length 8 bytes. 232 | 233 | ```go 234 | 61| var ( 235 | 62| pieceSize uint = 1 << 3 // in bytes 236 | 63| ) 237 | 238 | 65| enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(m_data, pieceSize) 239 | 66| if err != nil { /* exit */ } 240 | ``` 241 | 242 | So I've generated 2474 bytes of JSON serialised data, which after splitting into equal sized byte slices ( read original pieces ), I get 310 pieces --- pieces which are to be coded together. It requires me to append 6 empty bytes --- `8 x 310 - 6 = 2480 - 6 = 2474 bytes` 243 | 244 | Systematic encoder also informs me, I need to consume 98580 bytes of coded data to construct original pieces i.e. original JSON serialised data. 245 | 246 | I simulate some pieces collected, while some are dropped 247 | 248 | ```go 249 | 75| dec := systematic.NewSystematicRLNCDecoder(enc.PieceCount()) 250 | 76| for { 251 | c_piece := enc.CodedPiece() 252 | 253 | // simulating piece drop/ loss 254 | if rand.Intn(2) == 0 { 255 | continue 256 | } 257 | 258 | err := dec.AddPiece(c_piece) 259 | ... 260 | 88| } 261 | ``` 262 | 263 | > Note: As these pieces are coded using systematic encoder, first N-many pieces ( here N = 310 ), are kept uncoded though they're augmented to be coded by providing with coding vector which has only one non-zero element. Next coded pieces i.e. >310th carry randomly generated coding vectors as usual. 264 | 265 | I'm able to recover 2480 bytes of serialised data, but notice, padding is counted. So I strip out last 6 padding bytes, which results into 2474 bytes of serialised data. Computing SHA512 on recovered data must produce same hash as found with original data. 266 | 267 | And it's indeed same hash `0x25c37651f7a567963a884ef04d7dc6df0901ab58ca28aa3eaf31097e5d9155d4` --- asserting reconstructed data is same as original data, when padding bytes stripped out. 268 | 269 | ![example](./img/systematic_rlnc_example.png) 270 | 271 | For running this example 272 | 273 | ```bash 274 | pushd example/systematic 275 | go run main.go 276 | popd 277 | ``` 278 | 279 | > Note: Your console log is probably going to be different than mine. 280 | 281 | --- 282 | 283 | **More schemes coming soon !** 284 | -------------------------------------------------------------------------------- /bench/full/decode_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/itzmeanjan/kodr" 9 | "github.com/itzmeanjan/kodr/full" 10 | ) 11 | 12 | func BenchmarkFullRLNCDecoder(t *testing.B) { 13 | t.Run("256K", func(b *testing.B) { 14 | b.Run("16 Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<18) }) 15 | b.Run("32 Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<18) }) 16 | b.Run("64 Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<18) }) 17 | b.Run("128 Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<18) }) 18 | b.Run("256 Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<18) }) 19 | }) 20 | 21 | t.Run("512K", func(b *testing.B) { 22 | b.Run("16 Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<19) }) 23 | b.Run("32 Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<19) }) 24 | b.Run("64 Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<19) }) 25 | b.Run("128 Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<19) }) 26 | b.Run("256 Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<19) }) 27 | }) 28 | 29 | t.Run("1M", func(b *testing.B) { 30 | b.Run("16 Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<20) }) 31 | b.Run("32 Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<20) }) 32 | b.Run("64 Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<20) }) 33 | b.Run("128 Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<20) }) 34 | b.Run("256 Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<20) }) 35 | }) 36 | 37 | t.Run("2M", func(b *testing.B) { 38 | b.Run("16 Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<21) }) 39 | b.Run("32 Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<21) }) 40 | b.Run("64 Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<21) }) 41 | b.Run("128 Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<21) }) 42 | b.Run("256 Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<21) }) 43 | }) 44 | } 45 | 46 | func decode(t *testing.B, pieceCount uint, total uint) { 47 | rand.Seed(time.Now().UnixNano()) 48 | 49 | data := generateData(total) 50 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 51 | if err != nil { 52 | t.Fatalf("Error: %s\n", err.Error()) 53 | } 54 | 55 | pieces := make([]*kodr.CodedPiece, 0, 2*pieceCount) 56 | for i := 0; i < int(2*pieceCount); i++ { 57 | pieces = append(pieces, enc.CodedPiece()) 58 | } 59 | 60 | t.ResetTimer() 61 | 62 | totalDuration := 0 * time.Second 63 | for i := 0; i < t.N; i++ { 64 | totalDuration += decode_(t, pieceCount, pieces) 65 | } 66 | 67 | t.ReportMetric(0, "ns/op") 68 | t.ReportMetric(float64(totalDuration.Seconds())/float64(t.N), "second/decode") 69 | } 70 | 71 | func decode_(t *testing.B, pieceCount uint, pieces []*kodr.CodedPiece) time.Duration { 72 | dec := full.NewFullRLNCDecoder(pieceCount) 73 | 74 | // randomly shuffle piece ordering 75 | rand.Shuffle(int(2*pieceCount), func(i, j int) { 76 | pieces[i], pieces[j] = pieces[j], pieces[i] 77 | }) 78 | 79 | totalDuration := 0 * time.Second 80 | for j := 0; j < int(2*pieceCount); j++ { 81 | if j+1 >= int(pieceCount) && dec.IsDecoded() { 82 | break 83 | } 84 | 85 | begin := time.Now() 86 | dec.AddPiece(pieces[j]) 87 | totalDuration += time.Since(begin) 88 | } 89 | 90 | if !dec.IsDecoded() { 91 | t.Fatal("expected pieces to be decoded") 92 | } 93 | 94 | return totalDuration 95 | } 96 | -------------------------------------------------------------------------------- /bench/full/encode_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/itzmeanjan/kodr/full" 9 | ) 10 | 11 | func BenchmarkFullRLNCEncoder(t *testing.B) { 12 | t.Run("1M", func(b *testing.B) { 13 | b.Run("16 Pieces", func(b *testing.B) { encode(b, 1<<4, 1<<20) }) 14 | b.Run("32 Pieces", func(b *testing.B) { encode(b, 1<<5, 1<<20) }) 15 | b.Run("64 Pieces", func(b *testing.B) { encode(b, 1<<6, 1<<20) }) 16 | b.Run("128 Pieces", func(b *testing.B) { encode(b, 1<<7, 1<<20) }) 17 | b.Run("256 Pieces", func(b *testing.B) { encode(b, 1<<8, 1<<20) }) 18 | }) 19 | 20 | t.Run("16M", func(b *testing.B) { 21 | b.Run("16 Pieces", func(b *testing.B) { encode(b, 1<<4, 1<<24) }) 22 | b.Run("32 Pieces", func(b *testing.B) { encode(b, 1<<5, 1<<24) }) 23 | b.Run("64 Pieces", func(b *testing.B) { encode(b, 1<<6, 1<<24) }) 24 | b.Run("128 Pieces", func(b *testing.B) { encode(b, 1<<7, 1<<24) }) 25 | b.Run("256 Pieces", func(b *testing.B) { encode(b, 1<<8, 1<<24) }) 26 | }) 27 | 28 | t.Run("32M", func(b *testing.B) { 29 | b.Run("16 Pieces", func(b *testing.B) { encode(b, 1<<4, 1<<25) }) 30 | b.Run("32 Pieces", func(b *testing.B) { encode(b, 1<<5, 1<<25) }) 31 | b.Run("64 Pieces", func(b *testing.B) { encode(b, 1<<6, 1<<25) }) 32 | b.Run("128 Pieces", func(b *testing.B) { encode(b, 1<<7, 1<<25) }) 33 | b.Run("256 Pieces", func(b *testing.B) { encode(b, 1<<8, 1<<25) }) 34 | }) 35 | } 36 | 37 | // generate random data of N-bytes 38 | func generateData(n uint) []byte { 39 | data := make([]byte, n) 40 | // can safely ignore error 41 | rand.Read(data) 42 | return data 43 | } 44 | 45 | func encode(t *testing.B, pieceCount uint, total uint) { 46 | // non-reproducible random number sequence 47 | rand.Seed(time.Now().UnixNano()) 48 | 49 | data := generateData(total) 50 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 51 | if err != nil { 52 | t.Fatalf("Error: %s\n", err.Error()) 53 | } 54 | 55 | t.ReportAllocs() 56 | // because pieceSize = total / pieceCount 57 | // so each coded piece = pieceCount + pieceSize bytes 58 | t.SetBytes(int64(total) + int64(pieceCount+total/pieceCount)) 59 | t.ResetTimer() 60 | 61 | // keep generating encoded pieces on-the-fly 62 | for i := 0; i < t.N; i++ { 63 | enc.CodedPiece() 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /bench/full/recode_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/itzmeanjan/kodr" 9 | "github.com/itzmeanjan/kodr/full" 10 | ) 11 | 12 | func BenchmarkFullRLNCRecoder(t *testing.B) { 13 | t.Run("1M", func(b *testing.B) { 14 | b.Run("16 Pieces", func(b *testing.B) { recode(b, 1<<4, 1<<20) }) 15 | b.Run("32 Pieces", func(b *testing.B) { recode(b, 1<<5, 1<<20) }) 16 | b.Run("64 Pieces", func(b *testing.B) { recode(b, 1<<6, 1<<20) }) 17 | b.Run("128 Pieces", func(b *testing.B) { recode(b, 1<<7, 1<<20) }) 18 | b.Run("256 Pieces", func(b *testing.B) { recode(b, 1<<8, 1<<20) }) 19 | }) 20 | 21 | t.Run("16M", func(b *testing.B) { 22 | b.Run("16 Pieces", func(b *testing.B) { recode(b, 1<<4, 1<<24) }) 23 | b.Run("32 Pieces", func(b *testing.B) { recode(b, 1<<5, 1<<24) }) 24 | b.Run("64 Pieces", func(b *testing.B) { recode(b, 1<<6, 1<<24) }) 25 | b.Run("128 Pieces", func(b *testing.B) { recode(b, 1<<7, 1<<24) }) 26 | b.Run("256 Pieces", func(b *testing.B) { recode(b, 1<<8, 1<<24) }) 27 | }) 28 | 29 | t.Run("32M", func(b *testing.B) { 30 | b.Run("16 Pieces", func(b *testing.B) { recode(b, 1<<4, 1<<25) }) 31 | b.Run("32 Pieces", func(b *testing.B) { recode(b, 1<<5, 1<<25) }) 32 | b.Run("64 Pieces", func(b *testing.B) { recode(b, 1<<6, 1<<25) }) 33 | b.Run("128 Pieces", func(b *testing.B) { recode(b, 1<<7, 1<<25) }) 34 | b.Run("256 Pieces", func(b *testing.B) { recode(b, 1<<8, 1<<25) }) 35 | }) 36 | } 37 | 38 | func recode(t *testing.B, pieceCount uint, total uint) { 39 | // non-reproducible sequence 40 | rand.Seed(time.Now().UnixNano()) 41 | 42 | // -- encode 43 | data := generateData(total) 44 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 45 | if err != nil { 46 | t.Fatalf("Error: %s\n", err.Error()) 47 | } 48 | 49 | pieces := make([]*kodr.CodedPiece, 0, pieceCount) 50 | for i := 0; i < int(pieceCount); i++ { 51 | pieces = append(pieces, enc.CodedPiece()) 52 | } 53 | // -- encoding ends 54 | 55 | // -- recode 56 | rec := full.NewFullRLNCRecoder(pieces) 57 | 58 | t.ReportAllocs() 59 | t.SetBytes(int64((pieceCount+total/pieceCount)*pieceCount) + int64(pieceCount+total/pieceCount)) 60 | t.ResetTimer() 61 | 62 | for i := 0; i < t.N; i++ { 63 | if _, err := rec.CodedPiece(); err != nil { 64 | t.Fatalf("Error: %s\n", err.Error()) 65 | } 66 | } 67 | // -- recoding ends 68 | } 69 | -------------------------------------------------------------------------------- /bench/systematic/decoder_test.go: -------------------------------------------------------------------------------- 1 | package systematic_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/itzmeanjan/kodr" 9 | "github.com/itzmeanjan/kodr/full" 10 | "github.com/itzmeanjan/kodr/systematic" 11 | ) 12 | 13 | func BenchmarkFullRLNCDecoder(t *testing.B) { 14 | t.Run("1M", func(b *testing.B) { 15 | b.Run("16Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<20) }) 16 | b.Run("32Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<20) }) 17 | b.Run("64Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<20) }) 18 | b.Run("128Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<20) }) 19 | b.Run("256Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<20) }) 20 | b.Run("512Pieces", func(b *testing.B) { decode(b, 1<<9, 1<<20) }) 21 | }) 22 | 23 | t.Run("2M", func(b *testing.B) { 24 | b.Run("16Pieces", func(b *testing.B) { decode(b, 1<<4, 1<<21) }) 25 | b.Run("32Pieces", func(b *testing.B) { decode(b, 1<<5, 1<<21) }) 26 | b.Run("64Pieces", func(b *testing.B) { decode(b, 1<<6, 1<<21) }) 27 | b.Run("128Pieces", func(b *testing.B) { decode(b, 1<<7, 1<<21) }) 28 | b.Run("256Pieces", func(b *testing.B) { decode(b, 1<<8, 1<<21) }) 29 | b.Run("512Pieces", func(b *testing.B) { decode(b, 1<<9, 1<<21) }) 30 | }) 31 | } 32 | 33 | func decode(t *testing.B, pieceCount uint, total uint) { 34 | rand.Seed(time.Now().UnixNano()) 35 | 36 | data := generateData(total) 37 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 38 | if err != nil { 39 | t.Fatalf("Error: %s\n", err.Error()) 40 | } 41 | 42 | pieces := make([]*kodr.CodedPiece, 0, 2*pieceCount) 43 | for i := 0; i < int(2*pieceCount); i++ { 44 | pieces = append(pieces, enc.CodedPiece()) 45 | } 46 | 47 | t.ResetTimer() 48 | 49 | totalDuration := 0 * time.Second 50 | for i := 0; i < t.N; i++ { 51 | totalDuration += decode_(t, pieceCount, pieces) 52 | } 53 | 54 | t.ReportMetric(0, "ns/op") 55 | t.ReportMetric(float64(totalDuration.Seconds())/float64(t.N), "second/decode") 56 | } 57 | 58 | func decode_(t *testing.B, pieceCount uint, pieces []*kodr.CodedPiece) time.Duration { 59 | dec := full.NewFullRLNCDecoder(pieceCount) 60 | 61 | // randomly shuffle piece ordering 62 | rand.Shuffle(int(2*pieceCount), func(i, j int) { 63 | pieces[i], pieces[j] = pieces[j], pieces[i] 64 | }) 65 | 66 | totalDuration := 0 * time.Second 67 | for j := 0; j < int(2*pieceCount); j++ { 68 | if j+1 >= int(pieceCount) && dec.IsDecoded() { 69 | break 70 | } 71 | 72 | begin := time.Now() 73 | dec.AddPiece(pieces[j]) 74 | totalDuration += time.Since(begin) 75 | } 76 | 77 | if !dec.IsDecoded() { 78 | t.Fatal("expected pieces to be decoded") 79 | } 80 | 81 | return totalDuration 82 | } 83 | -------------------------------------------------------------------------------- /bench/systematic/encoder_test.go: -------------------------------------------------------------------------------- 1 | package systematic_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/itzmeanjan/kodr/systematic" 9 | ) 10 | 11 | func BenchmarkSystematicRLNCEncoder(b *testing.B) { 12 | b.Run("1M", func(b *testing.B) { 13 | b.Run("16Pieces", func(b *testing.B) { encode(b, 1<<4, 1<<20) }) 14 | b.Run("32Pieces", func(b *testing.B) { encode(b, 1<<5, 1<<20) }) 15 | b.Run("64Pieces", func(b *testing.B) { encode(b, 1<<6, 1<<20) }) 16 | b.Run("128Pieces", func(b *testing.B) { encode(b, 1<<7, 1<<20) }) 17 | b.Run("256Pieces", func(b *testing.B) { encode(b, 1<<8, 1<<20) }) 18 | b.Run("512Pieces", func(b *testing.B) { encode(b, 1<<9, 1<<20) }) 19 | }) 20 | } 21 | 22 | // generate random data of N-bytes 23 | func generateData(n uint) []byte { 24 | data := make([]byte, n) 25 | // can safely ignore error 26 | rand.Read(data) 27 | return data 28 | } 29 | 30 | func encode(t *testing.B, pieceCount uint, total uint) { 31 | // non-reproducible random number sequence 32 | rand.Seed(time.Now().UnixNano()) 33 | 34 | data := generateData(total) 35 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 36 | if err != nil { 37 | t.Fatalf("Error: %s\n", err.Error()) 38 | } 39 | 40 | t.ReportAllocs() 41 | t.SetBytes(int64(total+enc.Padding()) + int64(enc.CodedPieceLen())) 42 | t.ResetTimer() 43 | 44 | // keep generating encoded pieces on-the-fly 45 | for i := 0; i < t.N; i++ { 46 | enc.CodedPiece() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /data.go: -------------------------------------------------------------------------------- 1 | package kodr 2 | 3 | import ( 4 | "crypto/rand" 5 | "math" 6 | 7 | "github.com/cloud9-tools/go-galoisfield" 8 | ) 9 | 10 | // A piece of data is nothing but a byte array 11 | type Piece []byte 12 | 13 | // Multiple pieces are coded together by performing 14 | // symbol by symbol finite field arithmetic, where 15 | // a single byte is a symbol 16 | // 17 | // `by` is coding coefficient 18 | func (p *Piece) Multiply(piece Piece, by byte, field *galoisfield.GF) { 19 | for i := range piece { 20 | (*p)[i] = field.Add((*p)[i], field.Mul(piece[i], by)) 21 | } 22 | } 23 | 24 | // One component of coded piece; holding 25 | // information regarding how original pieces are 26 | // combined together 27 | type CodingVector []byte 28 | 29 | // Coded piece along with randomly generated coding vector 30 | // to be used by recoder/ decoder 31 | type CodedPiece struct { 32 | Vector CodingVector 33 | Piece Piece 34 | } 35 | 36 | // Total length of coded piece --- len(coding_vector) + len(piece) 37 | func (c *CodedPiece) Len() uint { 38 | return uint(len(c.Vector) + len(c.Piece)) 39 | } 40 | 41 | // Flattens coded piece into single byte 42 | // slice ( vector ++ piece ), so that 43 | // decoding steps can be performed -- rref 44 | // on received data matrix 45 | func (c *CodedPiece) Flatten() []byte { 46 | res := make([]byte, c.Len()) 47 | copy(res[:len(c.Vector)], c.Vector) 48 | copy(res[len(c.Vector):], c.Piece) 49 | return res 50 | } 51 | 52 | // Returns true if finds this piece is coded 53 | // systematically i.e. piece is actually 54 | // uncoded, just being augmented that it's coded 55 | // which is why coding vector has only one 56 | // non-zero element ( 1 ) 57 | func (c *CodedPiece) IsSystematic() bool { 58 | pos := -1 59 | for i := 0; i < len(c.Vector); i++ { 60 | switch c.Vector[i] { 61 | case 0: 62 | continue 63 | 64 | case 1: 65 | if pos != -1 { 66 | return false 67 | } 68 | pos = i 69 | 70 | default: 71 | return false 72 | 73 | } 74 | } 75 | return pos >= 0 && pos < len(c.Vector) 76 | } 77 | 78 | // Generates random coding vector of specified length 79 | // 80 | // No specific randomization choice is made, default available 81 | // source is used 82 | func GenerateCodingVector(n uint) CodingVector { 83 | vector := make(CodingVector, n) 84 | // ignoring error, because it always succeeds 85 | rand.Read(vector) 86 | return vector 87 | } 88 | 89 | // Given whole chunk of data & desired size of each pieces ( in terms of bytes ), 90 | // it'll split chunk into pieces, which are to be used by encoder for performing RLNC 91 | // 92 | // In case whole data chunk can't be properly divided into pieces of requested size, 93 | // extra zero bytes may be appended at end, considered as padding bytes --- given that 94 | // each piece must be of same size 95 | func OriginalPiecesFromDataAndPieceSize(data []byte, pieceSize uint) ([]Piece, uint, error) { 96 | if pieceSize == 0 { 97 | return nil, 0, ErrZeroPieceSize 98 | } 99 | 100 | if int(pieceSize) >= len(data) { 101 | return nil, 0, ErrBadPieceCount 102 | } 103 | 104 | pieceCount := int(math.Ceil(float64(len(data)) / float64(pieceSize))) 105 | padding := uint(pieceCount*int(pieceSize) - len(data)) 106 | 107 | var data_ []byte 108 | if padding > 0 { 109 | data_ = make([]byte, pieceCount*int(pieceSize)) 110 | if n := copy(data_, data); n != len(data) { 111 | return nil, 0, ErrCopyFailedDuringPieceConstruction 112 | } 113 | } else { 114 | data_ = data 115 | } 116 | 117 | pieces := make([]Piece, pieceCount) 118 | for i := 0; i < pieceCount; i++ { 119 | piece := data_[int(pieceSize)*i : int(pieceSize)*(i+1)] 120 | pieces[i] = piece 121 | } 122 | 123 | return pieces, padding, nil 124 | } 125 | 126 | // When you want to split whole data chunk into N-many original pieces, this function 127 | // will do it, while appending extra zero bytes ( read padding bytes ) at end of last piece 128 | // if exact division is not feasible 129 | func OriginalPiecesFromDataAndPieceCount(data []byte, pieceCount uint) ([]Piece, uint, error) { 130 | if pieceCount < 2 { 131 | return nil, 0, ErrBadPieceCount 132 | } 133 | 134 | if int(pieceCount) > len(data) { 135 | return nil, 0, ErrPieceCountMoreThanTotalBytes 136 | } 137 | 138 | pieceSize := uint(math.Ceil(float64(len(data)) / float64(pieceCount))) 139 | padding := pieceCount*pieceSize - uint(len(data)) 140 | 141 | var data_ []byte 142 | if padding > 0 { 143 | data_ = make([]byte, pieceSize*pieceCount) 144 | if n := copy(data_, data); n != len(data) { 145 | return nil, 0, ErrCopyFailedDuringPieceConstruction 146 | } 147 | } else { 148 | data_ = data 149 | } 150 | 151 | // padding field being ignored, because I've already computed it 152 | // in line 134 153 | // 154 | // Here ignored field will always be 0, because it's already extended ( if required ) to be 155 | // properly divisible by `pieceSize`, which is checked in function invoked below 156 | splitted, _, err := OriginalPiecesFromDataAndPieceSize(data_, pieceSize) 157 | return splitted, padding, err 158 | } 159 | 160 | // Before recoding can be performed, coded pieces byte array i.e. []<< coding vector ++ coded piece >> 161 | // where each coded piece is << coding vector ++ coded piece >> ( flattened ) is splitted into 162 | // structured data i.e. into components {coding vector, coded piece}, where how many coded pieces are 163 | // present in byte array ( read `data` ) & how many pieces are coded together ( read coding vector length ) 164 | // are provided 165 | func CodedPiecesForRecoding(data []byte, pieceCount uint, piecesCodedTogether uint) ([]*CodedPiece, error) { 166 | codedPieceLength := len(data) / int(pieceCount) 167 | if codedPieceLength*int(pieceCount) != len(data) { 168 | return nil, ErrCodedDataLengthMismatch 169 | } 170 | 171 | if !(piecesCodedTogether < uint(codedPieceLength)) { 172 | return nil, ErrCodingVectorLengthMismatch 173 | } 174 | 175 | codedPieces := make([]*CodedPiece, pieceCount) 176 | for i := 0; i < int(pieceCount); i++ { 177 | codedPiece := data[codedPieceLength*i : codedPieceLength*(i+1)] 178 | codedPieces[i] = &CodedPiece{ 179 | Vector: codedPiece[:piecesCodedTogether], 180 | Piece: codedPiece[piecesCodedTogether:], 181 | } 182 | } 183 | 184 | return codedPieces, nil 185 | } 186 | -------------------------------------------------------------------------------- /data_test.go: -------------------------------------------------------------------------------- 1 | package kodr_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/itzmeanjan/kodr" 11 | "github.com/itzmeanjan/kodr/full" 12 | ) 13 | 14 | // Generates `N`-bytes of random data from default 15 | // randomization source 16 | func generateData(n uint) []byte { 17 | data := make([]byte, n) 18 | // can safely ignore error 19 | rand.Read(data) 20 | return data 21 | } 22 | 23 | func TestSplitDataByCount(t *testing.T) { 24 | rand.Seed(time.Now().UnixNano()) 25 | 26 | size := uint(2<<10 + rand.Intn(2<<10)) 27 | count := uint(2<<1 + rand.Intn(int(size))) 28 | data := generateData(size) 29 | 30 | if _, _, err := kodr.OriginalPiecesFromDataAndPieceCount(data, 0); !(err != nil && errors.Is(err, kodr.ErrBadPieceCount)) { 31 | t.Fatalf("expected: %s\n", kodr.ErrBadPieceCount) 32 | } 33 | 34 | if _, _, err := kodr.OriginalPiecesFromDataAndPieceCount(data, size+1); !(err != nil && errors.Is(err, kodr.ErrPieceCountMoreThanTotalBytes)) { 35 | t.Fatalf("expected: %s\n", kodr.ErrPieceCountMoreThanTotalBytes) 36 | } 37 | 38 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceCount(data, count) 39 | if err != nil { 40 | t.Fatalf("didn't expect error: %s\n", err) 41 | } 42 | 43 | if len(pieces) != int(count) { 44 | t.Fatalf("expected %d pieces, found %d\n", count, len(pieces)) 45 | } 46 | } 47 | 48 | func TestSplitDataBySize(t *testing.T) { 49 | rand.Seed(time.Now().UnixNano()) 50 | 51 | size := uint(2<<10 + rand.Intn(2<<10)) 52 | pieceSize := uint(2<<1 + rand.Intn(int(size/2))) 53 | data := generateData(size) 54 | 55 | if _, _, err := kodr.OriginalPiecesFromDataAndPieceSize(data, 0); !(err != nil && errors.Is(err, kodr.ErrZeroPieceSize)) { 56 | t.Fatalf("expected: %s\n", kodr.ErrZeroPieceSize) 57 | } 58 | 59 | if _, _, err := kodr.OriginalPiecesFromDataAndPieceSize(data, size); !(err != nil && errors.Is(err, kodr.ErrBadPieceCount)) { 60 | t.Fatalf("expected: %s\n", kodr.ErrBadPieceCount) 61 | } 62 | 63 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceSize(data, pieceSize) 64 | if err != nil { 65 | t.Fatalf("didn't expect error: %s\n", err) 66 | } 67 | 68 | for i := 0; i < len(pieces); i++ { 69 | if len(pieces[i]) != int(pieceSize) { 70 | t.Fatalf("expected piece size of %d bytes; found of %d bytes", pieceSize, len(pieces[i])) 71 | } 72 | } 73 | } 74 | 75 | func TestCodedPieceFlattening(t *testing.T) { 76 | piece := &kodr.CodedPiece{Vector: generateData(2 << 5), Piece: generateData(2 << 10)} 77 | flat := piece.Flatten() 78 | if len(flat) != len(piece.Piece)+len(piece.Vector) { 79 | t.Fatal("coded piece flattening failed") 80 | } 81 | 82 | if !bytes.Equal(flat[:len(piece.Vector)], piece.Vector) || !bytes.Equal(flat[len(piece.Vector):], piece.Piece) { 83 | t.Fatal("flattened piece doesn't match << vector ++ piece >>") 84 | } 85 | } 86 | 87 | func TestCodedPiecesForRecoding(t *testing.T) { 88 | rand.Seed(time.Now().UnixNano()) 89 | 90 | size := 6 91 | data := generateData(uint(size)) 92 | pieceCount := 3 93 | codedPieceCount := pieceCount + 2 94 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, uint(pieceCount)) 95 | if err != nil { 96 | t.Fatal(err.Error()) 97 | } 98 | 99 | codedPieces := make([]*kodr.CodedPiece, 0, pieceCount) 100 | for i := 0; i < codedPieceCount; i++ { 101 | codedPieces = append(codedPieces, enc.CodedPiece()) 102 | } 103 | 104 | flattenedCodedPieces := make([]byte, 0) 105 | for i := 0; i < codedPieceCount; i++ { 106 | // this is where << coding vector ++ coded piece >> 107 | // is kept in byte concatenated form 108 | flat := codedPieces[i].Flatten() 109 | flattenedCodedPieces = append(flattenedCodedPieces, flat...) 110 | } 111 | 112 | if _, err := kodr.CodedPiecesForRecoding(flattenedCodedPieces, uint(codedPieceCount)-2, uint(pieceCount)); !(err != nil && errors.Is(err, kodr.ErrCodedDataLengthMismatch)) { 113 | t.Fatalf("expected: %s\n", kodr.ErrCodedDataLengthMismatch) 114 | } 115 | 116 | if _, err := kodr.CodedPiecesForRecoding(flattenedCodedPieces, uint(codedPieceCount), uint(codedPieceCount)); !(err != nil && errors.Is(err, kodr.ErrCodingVectorLengthMismatch)) { 117 | t.Fatalf("expected: %s\n", kodr.ErrCodingVectorLengthMismatch) 118 | } 119 | 120 | codedPieces_, err := kodr.CodedPiecesForRecoding(flattenedCodedPieces, uint(codedPieceCount), uint(pieceCount)) 121 | if err != nil { 122 | t.Fatal(err.Error()) 123 | } 124 | for i := 0; i < len(codedPieces_); i++ { 125 | if !bytes.Equal(codedPieces_[i].Vector, codedPieces[i].Vector) { 126 | t.Fatal("coding vector mismatch !") 127 | } 128 | 129 | if !bytes.Equal(codedPieces_[i].Piece, codedPieces[i].Piece) { 130 | t.Fatal("coded piece mismatch !") 131 | } 132 | } 133 | } 134 | 135 | func TestIsSystematic(t *testing.T) { 136 | piece_1 := kodr.CodedPiece{Vector: []byte{0, 1, 0, 0}, Piece: []byte{1, 2, 3}} 137 | if !piece_1.IsSystematic() { 138 | t.Fatalf("%v should be systematic\n", piece_1) 139 | } 140 | 141 | piece_2 := kodr.CodedPiece{Vector: []byte{1, 1, 0, 0}, Piece: []byte{1, 2, 3}} 142 | if piece_2.IsSystematic() { 143 | t.Fatalf("%v shouldn't be systematic\n", piece_2) 144 | } 145 | 146 | piece_3 := kodr.CodedPiece{Vector: []byte{0, 0, 1, 0}, Piece: []byte{1, 2, 3}} 147 | if !piece_3.IsSystematic() { 148 | t.Fatalf("%v should be systematic\n", piece_3) 149 | } 150 | 151 | piece_4 := kodr.CodedPiece{Vector: []byte{0, 0, 0, 0}, Piece: []byte{1, 2, 3}} 152 | if piece_4.IsSystematic() { 153 | t.Fatalf("%v shouldn't be systematic\n", piece_4) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package kodr 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrMatrixDimensionMismatch = errors.New("can't perform matrix multiplication") 7 | ErrAllUsefulPiecesReceived = errors.New("no more pieces required for decoding") 8 | ErrMoreUsefulPiecesRequired = errors.New("not enough pieces received yet to decode") 9 | ErrCopyFailedDuringPieceConstruction = errors.New("failed to copy whole data before splitting into pieces") 10 | ErrPieceCountMoreThanTotalBytes = errors.New("requested piece count > total bytes of original data") 11 | ErrZeroPieceSize = errors.New("pieces can't be sized as zero byte") 12 | ErrBadPieceCount = errors.New("minimum 2 pieces required for RLNC") 13 | ErrCodedDataLengthMismatch = errors.New("coded data length != coded piece count x coded piece length") 14 | ErrCodingVectorLengthMismatch = errors.New("coding vector length > coded piece length ( in total )") 15 | ErrPieceNotDecodedYet = errors.New("piece not decoded yet, more pieces required") 16 | ErrPieceOutOfBound = errors.New("requested piece index >= pieceCount ( pieces coded together )") 17 | ) 18 | -------------------------------------------------------------------------------- /example/full/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha512" 6 | "encoding/hex" 7 | "errors" 8 | "log" 9 | "math/rand" 10 | "os" 11 | "path" 12 | "time" 13 | 14 | "github.com/itzmeanjan/kodr" 15 | "github.com/itzmeanjan/kodr/full" 16 | ) 17 | 18 | func main() { 19 | // setting up random number generator seed using time as source 20 | // so that RLNC's coding coefficients are generated randomly 21 | // on every run 22 | rand.Seed(time.Now().UnixNano()) 23 | 24 | img := path.Join("..", "..", "img", "logo.png") 25 | log.Printf("Reading from %s\n", img) 26 | data, err := os.ReadFile(img) 27 | if err != nil { 28 | log.Printf("Error: %s\n", err.Error()) 29 | os.Exit(1) 30 | } 31 | 32 | log.Printf("Read %d bytes\n", len(data)) 33 | 34 | hasher := sha512.New512_224() 35 | hasher.Write(data) 36 | sum := hasher.Sum(nil) 37 | log.Printf("SHA512: 0x%s\n\n", hex.EncodeToString(sum)) 38 | 39 | var ( 40 | pieceCount uint = 64 41 | codedPieceCount uint = 2 * pieceCount 42 | droppedPieceCount uint = pieceCount / 2 43 | ) 44 | 45 | log.Printf("Coding %d pieces together\n", pieceCount) 46 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 47 | if err != nil { 48 | log.Printf("Error: %s\n", err.Error()) 49 | os.Exit(1) 50 | } 51 | 52 | codedPieces := make([]*kodr.CodedPiece, 0, codedPieceCount) 53 | for i := 0; i < int(codedPieceCount); i++ { 54 | codedPieces = append(codedPieces, enc.CodedPiece()) 55 | } 56 | 57 | log.Printf("Coded into %d pieces\n", codedPieceCount) 58 | 59 | for i := 0; i < int(droppedPieceCount); i++ { 60 | idx := rand.Intn(len(codedPieces)) 61 | codedPieces[idx] = nil 62 | copy(codedPieces[idx:], codedPieces[idx+1:]) 63 | codedPieces[len(codedPieces)-1] = nil 64 | codedPieces = codedPieces[:len(codedPieces)-1] 65 | } 66 | 67 | log.Printf("Dropped %d pieces, remaining %d pieces\n\n", droppedPieceCount, len(codedPieces)) 68 | 69 | var ( 70 | recodedPieceCount uint = uint(len(codedPieces)) * 2 71 | ) 72 | 73 | rec := full.NewFullRLNCRecoder(codedPieces) 74 | recodedPieces := make([]*kodr.CodedPiece, 0, recodedPieceCount) 75 | for i := 0; i < int(recodedPieceCount); i++ { 76 | rec_p, err := rec.CodedPiece() 77 | if err != nil { 78 | log.Printf("Error: %s\n", err.Error()) 79 | os.Exit(1) 80 | } 81 | recodedPieces = append(recodedPieces, rec_p) 82 | } 83 | 84 | log.Printf("Recoded %d coded pieces into %d pieces\n", len(codedPieces), recodedPieceCount) 85 | rand.Shuffle(int(recodedPieceCount), func(i, j int) { 86 | recodedPieces[i], recodedPieces[j] = recodedPieces[j], recodedPieces[i] 87 | }) 88 | log.Printf("Shuffled %d coded pieces\n", recodedPieceCount) 89 | 90 | for i := 0; i < int(recodedPieceCount)/2; i++ { 91 | idx := rand.Intn(len(recodedPieces)) 92 | recodedPieces[idx] = nil 93 | copy(recodedPieces[idx:], recodedPieces[idx+1:]) 94 | recodedPieces[len(recodedPieces)-1] = nil 95 | recodedPieces = recodedPieces[:len(recodedPieces)-1] 96 | } 97 | 98 | log.Printf("Dropped %d pieces, remaining %d pieces\n\n", recodedPieceCount/2, len(recodedPieces)) 99 | 100 | log.Printf("Decoding with %d pieces\n", pieceCount) 101 | dec := full.NewFullRLNCDecoder(pieceCount) 102 | for i := 0; i < int(pieceCount); i++ { 103 | if err := dec.AddPiece(recodedPieces[i]); err != nil { 104 | log.Printf("Error: %s\n", err.Error()) 105 | os.Exit(1) 106 | } 107 | } 108 | 109 | if err := dec.AddPiece(codedPieces[pieceCount]); !(err != nil && errors.Is(err, kodr.ErrAllUsefulPiecesReceived)) { 110 | log.Printf("Error `%s` was expected to be thrown\n", kodr.ErrAllUsefulPiecesReceived) 111 | } 112 | 113 | dec_p, err := dec.GetPieces() 114 | if err != nil { 115 | log.Printf("Error: %s\n", err.Error()) 116 | os.Exit(1) 117 | } 118 | 119 | decoded_data := make([]byte, 0) 120 | for i := 0; i < len(dec_p); i++ { 121 | decoded_data = append(decoded_data, dec_p[i]...) 122 | } 123 | 124 | log.Printf("Decoded into %d bytes\n", len(decoded_data)) 125 | 126 | if !bytes.Equal(data, decoded_data[:len(data)]) { 127 | log.Println("Decoded data not matching !") 128 | os.Exit(1) 129 | } 130 | 131 | log.Printf("First %d bytes of decoded data matches original %d bytes\n", len(data), len(data)) 132 | log.Printf("3 bytes of padding: %v\n\n", decoded_data[len(data):]) 133 | 134 | hasher.Reset() 135 | hasher.Write(decoded_data[:len(data)]) 136 | sum = hasher.Sum(nil) 137 | log.Printf("SHA512: 0x%s\n", hex.EncodeToString(sum)) 138 | 139 | if err := os.WriteFile("recovered.png", decoded_data[:len(data)], 0o644); err != nil { 140 | log.Printf("Error: %s\n", err.Error()) 141 | os.Exit(1) 142 | } 143 | 144 | log.Printf("Wrote %d bytes into `./recovered.png`", len(data)) 145 | } 146 | -------------------------------------------------------------------------------- /example/systematic/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha512" 5 | "encoding/hex" 6 | "encoding/json" 7 | "errors" 8 | "log" 9 | "math/rand" 10 | "os" 11 | "time" 12 | 13 | "github.com/itzmeanjan/kodr" 14 | "github.com/itzmeanjan/kodr/systematic" 15 | ) 16 | 17 | type Data struct { 18 | FieldA uint `json:"fieldA"` 19 | FieldB float64 `json:"fieldB"` 20 | FieldC bool `json:"fieldC"` 21 | FieldD []byte `json:"fieldD"` 22 | } 23 | 24 | // Generates random byte array of size N 25 | func generateData(n uint) []byte { 26 | _container := make([]byte, 0, n) 27 | for i := 0; i < int(n); i++ { 28 | _container = append(_container, byte(rand.Intn(255))) 29 | } 30 | return _container 31 | } 32 | 33 | // Generates random `Data` i.e. values associated with 34 | // respective fields are random 35 | func randData() *Data { 36 | d := Data{ 37 | FieldA: uint(rand.Uint64()), 38 | FieldB: rand.Float64(), 39 | FieldC: rand.Intn(2) == 0, 40 | FieldD: generateData(uint(1<<10 + rand.Intn(1<<10))), 41 | } 42 | return &d 43 | } 44 | 45 | func main() { 46 | rand.Seed(time.Now().UnixNano()) 47 | 48 | data := randData() 49 | m_data, err := json.Marshal(&data) 50 | if err != nil { 51 | log.Printf("Error: %s\n", err.Error()) 52 | os.Exit(1) 53 | } 54 | log.Printf("Original serialised data of %d bytes\n", len(m_data)) 55 | 56 | hasher := sha512.New512_256() 57 | hasher.Write(m_data) 58 | sum := hasher.Sum(nil) 59 | log.Printf("SHA512(original): 0x%s\n", hex.EncodeToString(sum)) 60 | 61 | var ( 62 | pieceSize uint = 1 << 3 // in bytes 63 | ) 64 | 65 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(m_data, pieceSize) 66 | if err != nil { 67 | log.Printf("Error: %s\n", err.Error()) 68 | os.Exit(1) 69 | } 70 | 71 | log.Printf("%d pieces being coded together, each of %d bytes\n", enc.PieceCount(), enc.PieceSize()) 72 | log.Printf("%d bytes of padding used\n\n", enc.Padding()) 73 | log.Printf("%d bytes of coded data to be consumed for successful decoding\n", enc.DecodableLen()) 74 | 75 | dec := systematic.NewSystematicRLNCDecoder(enc.PieceCount()) 76 | for { 77 | c_piece := enc.CodedPiece() 78 | 79 | // simulating piece drop/ loss 80 | if rand.Intn(2) == 0 { 81 | continue 82 | } 83 | 84 | err := dec.AddPiece(c_piece) 85 | if err != nil && errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 86 | break 87 | } 88 | } 89 | 90 | d_pieces, err := dec.GetPieces() 91 | if err != nil { 92 | log.Printf("Error: %s\n", err.Error()) 93 | os.Exit(1) 94 | } 95 | 96 | d_flattened := make([]byte, 0, len(m_data)+int(enc.Padding())) 97 | for i := 0; i < len(d_pieces); i++ { 98 | d_flattened = append(d_flattened, d_pieces[i]...) 99 | } 100 | 101 | log.Printf("Recovered %d ( = %d + %d ) bytes flattened data\n", len(d_flattened), len(m_data), enc.Padding()) 102 | d_flattened = d_flattened[:len(m_data)] 103 | 104 | hasher.Reset() 105 | hasher.Write(d_flattened) 106 | sum = hasher.Sum(nil) 107 | log.Printf("SHA512(recovered): 0x%s\n", hex.EncodeToString(sum)) 108 | 109 | var rec_data Data 110 | if err := json.Unmarshal(d_flattened, &rec_data); err != nil { 111 | log.Printf("Error: %s\n", err.Error()) 112 | os.Exit(1) 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /full/decoder.go: -------------------------------------------------------------------------------- 1 | package full 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | "github.com/itzmeanjan/kodr/matrix" 7 | ) 8 | 9 | type FullRLNCDecoder struct { 10 | expected, useful, received uint 11 | state *matrix.DecoderState 12 | } 13 | 14 | // PieceLength - Returns piece length in bytes 15 | // 16 | // If no pieces are yet added to decoder state, then 17 | // returns 0, denoting **unknown** 18 | func (d *FullRLNCDecoder) PieceLength() uint { 19 | if d.received > 0 { 20 | coded := d.state.CodedPieceMatrix() 21 | return coded.Cols() 22 | } 23 | 24 | return 0 25 | } 26 | 27 | // IsDecoded - Use it for checking whether more piece 28 | // collection is required or not 29 | // 30 | // If it returns false, denotes more linearly independent pieces 31 | // need to be collected, only then decoding can be completed 32 | func (d *FullRLNCDecoder) IsDecoded() bool { 33 | return d.useful >= d.expected 34 | } 35 | 36 | // Required - How many more linearly independent pieces 37 | // are required for successfully decoding pieces ? 38 | func (d *FullRLNCDecoder) Required() uint { 39 | return d.expected - d.useful 40 | } 41 | 42 | // AddPiece - Adds a new received coded piece along with 43 | // coding vector. After every new coded piece reception 44 | // augmented matrix ( coding vector + coded piece ) 45 | // is rref-ed, to keep it as ready as possible for consuming 46 | // decoded pieces 47 | // 48 | // Note: As soon as all pieces are decoded, no more calls to 49 | // this method does anything useful --- so better check for error & proceed ! 50 | func (d *FullRLNCDecoder) AddPiece(piece *kodr.CodedPiece) error { 51 | // good time to start reading decoded pieces 52 | if d.IsDecoded() { 53 | return kodr.ErrAllUsefulPiecesReceived 54 | } 55 | 56 | d.state.AddPiece(piece) 57 | d.received++ 58 | if !(d.received > 1) { 59 | d.useful++ 60 | return nil 61 | } 62 | 63 | d.state.Rref() 64 | d.useful = d.state.Rank() 65 | return nil 66 | } 67 | 68 | // GetPiece - Get a decoded piece by index, may ( not ) succeed ! 69 | // 70 | // Note: It's not necessary that full decoding needs to happen 71 | // for this method to return something useful 72 | // 73 | // If M-many pieces are received among N-many expected ( read M <= N ) 74 | // then pieces with index in [0..M] ( remember upper bound exclusive ) 75 | // can be attempted to be consumed, given algebric structure has revealed 76 | // requested piece at index `i` 77 | func (d *FullRLNCDecoder) GetPiece(i uint) (kodr.Piece, error) { 78 | return d.state.GetPiece(i) 79 | } 80 | 81 | // GetPieces - Get a list of all decoded pieces, given full 82 | // decoding has happened 83 | func (d *FullRLNCDecoder) GetPieces() ([]kodr.Piece, error) { 84 | if !d.IsDecoded() { 85 | return nil, kodr.ErrMoreUsefulPiecesRequired 86 | } 87 | 88 | pieces := make([]kodr.Piece, 0, d.useful) 89 | for i := 0; i < int(d.useful); i++ { 90 | // error mustn't happen at this point, it should 91 | // have been returned fromvery first `if-block` in function 92 | piece, err := d.GetPiece(uint(i)) 93 | if err != nil { 94 | return nil, err 95 | } 96 | pieces = append(pieces, piece) 97 | } 98 | return pieces, nil 99 | } 100 | 101 | // If minimum #-of linearly independent coded pieces required 102 | // for decoding coded pieces --- is provided with, 103 | // it returns a decoder, which keeps applying 104 | // full RLNC decoding step on received coded pieces 105 | // 106 | // As soon as minimum #-of linearly independent pieces are obtained 107 | // which is generally equal to original #-of pieces, decoded pieces 108 | // can be read back 109 | func NewFullRLNCDecoder(pieceCount uint) *FullRLNCDecoder { 110 | gf := galoisfield.DefaultGF256 111 | state := matrix.NewDecoderStateWithPieceCount(gf, pieceCount) 112 | return &FullRLNCDecoder{expected: pieceCount, state: state} 113 | } 114 | -------------------------------------------------------------------------------- /full/decoder_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/itzmeanjan/kodr" 11 | "github.com/itzmeanjan/kodr/full" 12 | ) 13 | 14 | func TestNewFullRLNCDecoder(t *testing.T) { 15 | rand.Seed(time.Now().UnixNano()) 16 | 17 | pieceCount := 128 18 | pieceLength := 8192 19 | codedPieceCount := pieceCount + 2 20 | pieces := generatePieces(uint(pieceCount), uint(pieceLength)) 21 | enc := full.NewFullRLNCEncoder(pieces) 22 | 23 | coded := make([]*kodr.CodedPiece, 0, codedPieceCount) 24 | for i := 0; i < codedPieceCount; i++ { 25 | coded = append(coded, enc.CodedPiece()) 26 | } 27 | 28 | dec := full.NewFullRLNCDecoder(uint(pieceCount)) 29 | neededPieceCount := uint(pieceCount) 30 | for i := 0; i < codedPieceCount; i++ { 31 | 32 | // test whether required piece count is monotonically decreasing or not 33 | switch i { 34 | case 0: 35 | if req_ := dec.Required(); req_ != neededPieceCount { 36 | t.Fatalf("expected still needed piece count to be %d, found it to be %d\n", neededPieceCount, req_) 37 | } 38 | // skip unnecessary assignment to `needPieceCount` 39 | 40 | default: 41 | if req_ := dec.Required(); !(req_ <= neededPieceCount) { 42 | t.Fatal("expected required piece count to monotonically decrease") 43 | } else { 44 | neededPieceCount = req_ 45 | } 46 | } 47 | 48 | if err := dec.AddPiece(coded[i]); errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 49 | break 50 | } 51 | } 52 | 53 | if !dec.IsDecoded() { 54 | t.Fatal("expected to be fully decoded !") 55 | } 56 | 57 | for i := 0; i < codedPieceCount-pieceCount; i++ { 58 | if err := dec.AddPiece(coded[pieceCount+i]); !(err != nil && errors.Is(err, kodr.ErrAllUsefulPiecesReceived)) { 59 | t.Fatal("expected error indication, received nothing !") 60 | } 61 | } 62 | 63 | d_pieces, err := dec.GetPieces() 64 | if err != nil { 65 | t.Fatal(err.Error()) 66 | } 67 | 68 | if len(pieces) != len(d_pieces) { 69 | t.Fatal("didn't decode all !") 70 | } 71 | 72 | for i := 0; i < pieceCount; i++ { 73 | if !bytes.Equal(pieces[i], d_pieces[i]) { 74 | t.Fatal("decoded data doesn't match !") 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /full/encoder.go: -------------------------------------------------------------------------------- 1 | package full 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | ) 7 | 8 | type FullRLNCEncoder struct { 9 | field *galoisfield.GF 10 | pieces []kodr.Piece 11 | extra uint 12 | } 13 | 14 | // Total #-of pieces being coded together --- denoting 15 | // these many linearly independent pieces are required 16 | // successfully decoding back to original pieces 17 | func (f *FullRLNCEncoder) PieceCount() uint { 18 | return uint(len(f.pieces)) 19 | } 20 | 21 | // Pieces which are coded together are all of same size 22 | // 23 | // Total data being coded = pieceSize * pieceCount ( may include 24 | // some padding bytes ) 25 | func (f *FullRLNCEncoder) PieceSize() uint { 26 | return uint(len(f.pieces[0])) 27 | } 28 | 29 | // How many bytes of data, constructed by concatenating 30 | // coded pieces together, required at minimum for decoding 31 | // back to original pieces ? 32 | // 33 | // As I'm coding N-many pieces together, I need at least N-many 34 | // linearly independent pieces, which are concatenated together 35 | // to form a byte slice & can be used for original data reconstruction. 36 | // 37 | // So it computes N * codedPieceLen 38 | func (f *FullRLNCEncoder) DecodableLen() uint { 39 | return f.PieceCount() * f.CodedPieceLen() 40 | } 41 | 42 | // If N-many original pieces are coded together 43 | // what could be length of one such coded piece 44 | // obtained by invoking `CodedPiece` ? 45 | // 46 | // Here N = len(pieces), original pieces which are 47 | // being coded together 48 | func (f *FullRLNCEncoder) CodedPieceLen() uint { 49 | return f.PieceCount() + f.PieceSize() 50 | } 51 | 52 | // How many extra padding bytes added at end of 53 | // original data slice so that splitted pieces are 54 | // all of same size ? 55 | func (f *FullRLNCEncoder) Padding() uint { 56 | return f.extra 57 | } 58 | 59 | // Returns a coded piece, which is constructed on-the-fly 60 | // by randomly drawing elements from finite field i.e. 61 | // coding coefficients & performing full-RLNC with 62 | // all original pieces 63 | func (f *FullRLNCEncoder) CodedPiece() *kodr.CodedPiece { 64 | vector := kodr.GenerateCodingVector(f.PieceCount()) 65 | piece := make(kodr.Piece, f.PieceSize()) 66 | for i := range f.pieces { 67 | piece.Multiply(f.pieces[i], vector[i], f.field) 68 | } 69 | return &kodr.CodedPiece{ 70 | Vector: vector, 71 | Piece: piece, 72 | } 73 | } 74 | 75 | // Provide with original pieces on which fullRLNC to be performed 76 | // & get encoder, to be used for on-the-fly generation 77 | // to N-many coded pieces 78 | func NewFullRLNCEncoder(pieces []kodr.Piece) *FullRLNCEncoder { 79 | return &FullRLNCEncoder{pieces: pieces, field: galoisfield.DefaultGF256} 80 | } 81 | 82 | // If you know #-of pieces you want to code together, invoking 83 | // this function splits whole data chunk into N-pieces, with padding 84 | // bytes appended at end of last piece, if required & prepares 85 | // full RLNC encoder for obtaining coded pieces 86 | func NewFullRLNCEncoderWithPieceCount(data []byte, pieceCount uint) (*FullRLNCEncoder, error) { 87 | pieces, padding, err := kodr.OriginalPiecesFromDataAndPieceCount(data, pieceCount) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | enc := NewFullRLNCEncoder(pieces) 93 | enc.extra = padding 94 | return enc, nil 95 | } 96 | 97 | // If you want to have N-bytes piece size for each, this 98 | // function generates M-many pieces each of N-bytes size, which are ready 99 | // to be coded together with full RLNC 100 | func NewFullRLNCEncoderWithPieceSize(data []byte, pieceSize uint) (*FullRLNCEncoder, error) { 101 | pieces, padding, err := kodr.OriginalPiecesFromDataAndPieceSize(data, pieceSize) 102 | if err != nil { 103 | return nil, err 104 | } 105 | 106 | enc := NewFullRLNCEncoder(pieces) 107 | enc.extra = padding 108 | return enc, nil 109 | } 110 | -------------------------------------------------------------------------------- /full/encoder_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math" 7 | "math/rand" 8 | "testing" 9 | "time" 10 | 11 | "github.com/itzmeanjan/kodr" 12 | "github.com/itzmeanjan/kodr/full" 13 | ) 14 | 15 | // Generates `N`-bytes of random data from default 16 | // randomization source 17 | func generateData(n uint) []byte { 18 | data := make([]byte, n) 19 | // can safely ignore error 20 | rand.Read(data) 21 | return data 22 | } 23 | 24 | // Generates N-many pieces each of M-bytes length, to be used 25 | // for testing purposes 26 | func generatePieces(pieceCount uint, pieceLength uint) []kodr.Piece { 27 | pieces := make([]kodr.Piece, 0, pieceCount) 28 | for i := 0; i < int(pieceCount); i++ { 29 | pieces = append(pieces, generateData(pieceLength)) 30 | } 31 | return pieces 32 | } 33 | 34 | func encoderFlow(t *testing.T, enc *full.FullRLNCEncoder, pieceCount, codedPieceCount int, pieces []kodr.Piece) { 35 | coded := make([]*kodr.CodedPiece, 0, codedPieceCount) 36 | for i := 0; i < codedPieceCount; i++ { 37 | coded = append(coded, enc.CodedPiece()) 38 | } 39 | 40 | dec := full.NewFullRLNCDecoder(uint(pieceCount)) 41 | for i := 0; i < codedPieceCount; i++ { 42 | if i < pieceCount { 43 | if _, err := dec.GetPieces(); !(err != nil && errors.Is(err, kodr.ErrMoreUsefulPiecesRequired)) { 44 | t.Fatal("expected error indicating more pieces are required for decoding") 45 | } 46 | } 47 | 48 | if err := dec.AddPiece(coded[i]); errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 49 | break 50 | } 51 | } 52 | 53 | if !dec.IsDecoded() { 54 | t.Fatal("expected to be fully decoded !") 55 | } 56 | 57 | for i := 0; i < codedPieceCount-pieceCount; i++ { 58 | if err := dec.AddPiece(coded[pieceCount+i]); !(err != nil && errors.Is(err, kodr.ErrAllUsefulPiecesReceived)) { 59 | t.Fatal("expected error indication, received nothing !") 60 | } 61 | } 62 | 63 | d_pieces, err := dec.GetPieces() 64 | if err != nil { 65 | t.Fatal(err.Error()) 66 | } 67 | 68 | if len(pieces) != len(d_pieces) { 69 | t.Fatal("didn't decode all !") 70 | } 71 | 72 | for i := 0; i < pieceCount; i++ { 73 | if !bytes.Equal(pieces[i], d_pieces[i]) { 74 | t.Fatal("decoded data doesn't match !") 75 | } 76 | } 77 | } 78 | 79 | func TestNewFullRLNCEncoder(t *testing.T) { 80 | rand.Seed(time.Now().UnixNano()) 81 | 82 | pieceCount := 128 83 | pieceLength := 8192 84 | codedPieceCount := pieceCount + 2 85 | pieces := generatePieces(uint(pieceCount), uint(pieceLength)) 86 | enc := full.NewFullRLNCEncoder(pieces) 87 | 88 | encoderFlow(t, enc, pieceCount, codedPieceCount, pieces) 89 | } 90 | 91 | func TestNewFullRLNCEncoderWithPieceCount(t *testing.T) { 92 | rand.Seed(time.Now().UnixNano()) 93 | 94 | size := uint(2<<10 + rand.Intn(2<<10)) 95 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 96 | codedPieceCount := pieceCount + 2 97 | data := generateData(size) 98 | t.Logf("\nTotal Data: %d bytes\nPiece Count: %d\nCoded Piece Count: %d\n", size, pieceCount, codedPieceCount) 99 | 100 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceCount(data, pieceCount) 101 | if err != nil { 102 | t.Fatal(err.Error()) 103 | } 104 | 105 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 106 | if err != nil { 107 | t.Fatal(err.Error()) 108 | } 109 | 110 | encoderFlow(t, enc, int(pieceCount), int(codedPieceCount), pieces) 111 | } 112 | 113 | func TestNewFullRLNCEncoderWithPieceSize(t *testing.T) { 114 | rand.Seed(time.Now().UnixNano()) 115 | 116 | size := uint(2<<10 + rand.Intn(2<<10)) 117 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 118 | pieceCount := int(math.Ceil(float64(size) / float64(pieceSize))) 119 | codedPieceCount := pieceCount + 2 120 | data := generateData(size) 121 | t.Logf("\nTotal Data: %d bytes\nPiece Size: %d bytes\nPiece Count: %d\nCoded Piece Count: %d\n", size, pieceSize, pieceCount, codedPieceCount) 122 | 123 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceSize(data, pieceSize) 124 | if err != nil { 125 | t.Fatal(err.Error()) 126 | } 127 | 128 | enc, err := full.NewFullRLNCEncoderWithPieceSize(data, pieceSize) 129 | if err != nil { 130 | t.Fatal(err.Error()) 131 | } 132 | 133 | encoderFlow(t, enc, pieceCount, codedPieceCount, pieces) 134 | } 135 | 136 | func TestFullRLNCEncoderPadding(t *testing.T) { 137 | rand.Seed(time.Now().UnixNano()) 138 | 139 | t.Run("WithPieceCount", func(t *testing.T) { 140 | for i := 0; i < 1<<5; i++ { 141 | size := uint(2<<10 + rand.Intn(2<<10)) 142 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 143 | data := generateData(size) 144 | 145 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 146 | if err != nil { 147 | t.Fatalf("Error: %s\n", err.Error()) 148 | } 149 | 150 | extra := enc.Padding() 151 | pieceSize := (size + extra) / pieceCount 152 | c_piece := enc.CodedPiece() 153 | if uint(len(c_piece.Piece)) != pieceSize { 154 | t.Fatalf("expected pieceSize to be %dB, found to be %dB\n", pieceSize, len(c_piece.Piece)) 155 | } 156 | } 157 | }) 158 | 159 | t.Run("WithPieceSize", func(t *testing.T) { 160 | for i := 0; i < 1<<5; i++ { 161 | size := uint(2<<10 + rand.Intn(2<<10)) 162 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 163 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 164 | data := generateData(size) 165 | 166 | enc, err := full.NewFullRLNCEncoderWithPieceSize(data, pieceSize) 167 | if err != nil { 168 | t.Fatalf("Error: %s\n", err.Error()) 169 | } 170 | 171 | extra := enc.Padding() 172 | c_pieceSize := (size + extra) / pieceCount 173 | c_piece := enc.CodedPiece() 174 | if pieceSize != c_pieceSize || uint(len(c_piece.Piece)) != pieceSize { 175 | t.Fatalf("expected pieceSize to be %dB, found to be %dB\n", c_pieceSize, len(c_piece.Piece)) 176 | } 177 | } 178 | }) 179 | } 180 | 181 | func TestFullRLNCEncoder_CodedPieceLen(t *testing.T) { 182 | rand.Seed(time.Now().UnixNano()) 183 | 184 | t.Run("WithPieceCount", func(t *testing.T) { 185 | size := uint(2<<10 + rand.Intn(2<<10)) 186 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 187 | data := generateData(size) 188 | 189 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 190 | if err != nil { 191 | t.Fatalf("Error: %s\n", err.Error()) 192 | } 193 | 194 | for i := 0; i <= int(pieceCount); i++ { 195 | c_piece := enc.CodedPiece() 196 | if c_piece.Len() != enc.CodedPieceLen() { 197 | t.Fatalf("expected coded piece to be of %dB, found to be of %dB\n", enc.CodedPieceLen(), c_piece.Len()) 198 | } 199 | } 200 | }) 201 | 202 | t.Run("WithPieceSize", func(t *testing.T) { 203 | size := uint(2<<10 + rand.Intn(2<<10)) 204 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 205 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 206 | data := generateData(size) 207 | 208 | enc, err := full.NewFullRLNCEncoderWithPieceSize(data, pieceSize) 209 | if err != nil { 210 | t.Fatalf("Error: %s\n", err.Error()) 211 | } 212 | 213 | for i := 0; i <= int(pieceCount); i++ { 214 | c_piece := enc.CodedPiece() 215 | if c_piece.Len() != enc.CodedPieceLen() { 216 | t.Fatalf("expected coded piece to be of %dB, found to be of %dB\n", enc.CodedPieceLen(), c_piece.Len()) 217 | } 218 | } 219 | }) 220 | } 221 | 222 | func TestFullRLNCEncoder_DecodableLen(t *testing.T) { 223 | rand.Seed(time.Now().UnixNano()) 224 | 225 | flow := func(enc *full.FullRLNCEncoder, dec *full.FullRLNCDecoder) { 226 | consumed_len := uint(0) 227 | for !dec.IsDecoded() { 228 | c_piece := enc.CodedPiece() 229 | // randomly drop piece 230 | if rand.Intn(2) == 0 { 231 | continue 232 | } 233 | if err := dec.AddPiece(c_piece); errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 234 | break 235 | } 236 | 237 | // as consumed this piece --- accounting 238 | consumed_len += c_piece.Len() 239 | } 240 | 241 | if consumed_len < enc.DecodableLen() { 242 | t.Fatalf("expected to consume >=%dB for decoding, but actually consumed %dB\n", enc.DecodableLen(), consumed_len) 243 | } 244 | } 245 | 246 | t.Run("WithPieceCount", func(t *testing.T) { 247 | size := uint(2<<10 + rand.Intn(2<<10)) 248 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 249 | data := generateData(size) 250 | 251 | enc, err := full.NewFullRLNCEncoderWithPieceCount(data, pieceCount) 252 | if err != nil { 253 | t.Fatalf("Error: %s\n", err.Error()) 254 | } 255 | 256 | dec := full.NewFullRLNCDecoder(pieceCount) 257 | flow(enc, dec) 258 | }) 259 | 260 | t.Run("WithPieceSize", func(t *testing.T) { 261 | size := uint(2<<10 + rand.Intn(2<<10)) 262 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 263 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 264 | data := generateData(size) 265 | 266 | enc, err := full.NewFullRLNCEncoderWithPieceSize(data, pieceSize) 267 | if err != nil { 268 | t.Fatalf("Error: %s\n", err.Error()) 269 | } 270 | 271 | dec := full.NewFullRLNCDecoder(pieceCount) 272 | flow(enc, dec) 273 | }) 274 | } 275 | -------------------------------------------------------------------------------- /full/recoder.go: -------------------------------------------------------------------------------- 1 | package full 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | "github.com/itzmeanjan/kodr/matrix" 7 | ) 8 | 9 | type FullRLNCRecoder struct { 10 | field *galoisfield.GF 11 | pieces []*kodr.CodedPiece 12 | codingMatrix matrix.Matrix 13 | } 14 | 15 | func (r *FullRLNCRecoder) fill() { 16 | codingMatrix := make(matrix.Matrix, len(r.pieces)) 17 | for i := 0; i < len(r.pieces); i++ { 18 | codingMatrix[i] = make([]byte, len(r.pieces[i].Vector)) 19 | copy(codingMatrix[i], r.pieces[i].Vector) 20 | } 21 | r.codingMatrix = codingMatrix 22 | } 23 | 24 | // Returns recoded piece, which is constructed on-the-fly 25 | // by randomly drawing some coding coefficients from 26 | // finite field & performing full RLNC with all coded pieces 27 | func (r *FullRLNCRecoder) CodedPiece() (*kodr.CodedPiece, error) { 28 | pieceCount := uint(len(r.pieces)) 29 | vector := kodr.GenerateCodingVector(pieceCount) 30 | piece := make(kodr.Piece, len(r.pieces[0].Piece)) 31 | for i := range r.pieces { 32 | piece.Multiply(r.pieces[i].Piece, vector[i], r.field) 33 | } 34 | 35 | vector_ := matrix.Matrix{vector} 36 | mult, err := vector_.Multiply(r.field, r.codingMatrix) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | return &kodr.CodedPiece{ 42 | Vector: mult[0], 43 | Piece: piece, 44 | }, nil 45 | } 46 | 47 | // Provide with all coded pieces, which are to be used 48 | // for performing fullRLNC ( read recoding of coded data ) 49 | // & get back recoder which is used for on-the-fly construction 50 | // of N-many recoded pieces 51 | func NewFullRLNCRecoder(pieces []*kodr.CodedPiece) *FullRLNCRecoder { 52 | rec := &FullRLNCRecoder{field: galoisfield.DefaultGF256, pieces: pieces} 53 | rec.fill() 54 | return rec 55 | } 56 | 57 | // A byte slice which is formed by concatenating coded pieces, 58 | // will be splitted into structured coded pieces ( read having two components 59 | // i.e. coding vector & piece ) & recoder to be returned, which can be used 60 | // for on-the-fly random piece recoding 61 | func NewFullRLNCRecoderWithFlattenData(data []byte, pieceCount uint, piecesCodedTogether uint) (*FullRLNCRecoder, error) { 62 | codedPieces, err := kodr.CodedPiecesForRecoding(data, pieceCount, piecesCodedTogether) 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | return NewFullRLNCRecoder(codedPieces), nil 68 | } 69 | -------------------------------------------------------------------------------- /full/recoder_test.go: -------------------------------------------------------------------------------- 1 | package full_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/itzmeanjan/kodr" 11 | "github.com/itzmeanjan/kodr/full" 12 | ) 13 | 14 | func recoderFlow(t *testing.T, rec *full.FullRLNCRecoder, pieceCount int, pieces []kodr.Piece) { 15 | dec := full.NewFullRLNCDecoder(uint(pieceCount)) 16 | for { 17 | r_piece, err := rec.CodedPiece() 18 | if err != nil { 19 | t.Fatalf("Error: %s\n", err.Error()) 20 | } 21 | if err := dec.AddPiece(r_piece); errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 22 | break 23 | } 24 | } 25 | 26 | d_pieces, err := dec.GetPieces() 27 | if err != nil { 28 | t.Fatal(err.Error()) 29 | } 30 | 31 | if len(pieces) != len(d_pieces) { 32 | t.Fatal("didn't decode all !") 33 | } 34 | 35 | for i := 0; i < pieceCount; i++ { 36 | if !bytes.Equal(pieces[i], d_pieces[i]) { 37 | t.Fatal("decoded data doesn't match !") 38 | } 39 | } 40 | } 41 | 42 | func TestNewFullRLNCRecoder(t *testing.T) { 43 | rand.Seed(time.Now().UnixNano()) 44 | 45 | pieceCount := 128 46 | pieceLength := 8192 47 | codedPieceCount := pieceCount + 2 48 | pieces := generatePieces(uint(pieceCount), uint(pieceLength)) 49 | enc := full.NewFullRLNCEncoder(pieces) 50 | 51 | coded := make([]*kodr.CodedPiece, 0, codedPieceCount) 52 | for i := 0; i < codedPieceCount; i++ { 53 | coded = append(coded, enc.CodedPiece()) 54 | } 55 | 56 | rec := full.NewFullRLNCRecoder(coded) 57 | recoderFlow(t, rec, pieceCount, pieces) 58 | } 59 | 60 | func TestNewFullRLNCRecoderWithFlattenData(t *testing.T) { 61 | rand.Seed(time.Now().UnixNano()) 62 | 63 | pieceCount := 128 64 | pieceLength := 8192 65 | codedPieceCount := pieceCount + 2 66 | pieces := generatePieces(uint(pieceCount), uint(pieceLength)) 67 | enc := full.NewFullRLNCEncoder(pieces) 68 | 69 | coded := make([]*kodr.CodedPiece, 0, codedPieceCount) 70 | for i := 0; i < codedPieceCount; i++ { 71 | coded = append(coded, enc.CodedPiece()) 72 | } 73 | 74 | codedFlattened := make([]byte, 0) 75 | for i := 0; i < len(coded); i++ { 76 | codedFlattened = append(codedFlattened, coded[i].Flatten()...) 77 | } 78 | 79 | rec, err := full.NewFullRLNCRecoderWithFlattenData(codedFlattened, uint(codedPieceCount), uint(pieceCount)) 80 | if err != nil { 81 | t.Fatal(err.Error()) 82 | } 83 | 84 | recoderFlow(t, rec, pieceCount, pieces) 85 | } 86 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/itzmeanjan/kodr 2 | 3 | go 1.16 4 | 5 | require github.com/cloud9-tools/go-galoisfield v0.0.0-20160311182916-a8cf2bffadf0 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cloud9-tools/go-galoisfield v0.0.0-20160311182916-a8cf2bffadf0 h1:te0djrMIsqx2evVAE+U98Kfa09Jszv9OVxGJzjOjOXM= 2 | github.com/cloud9-tools/go-galoisfield v0.0.0-20160311182916-a8cf2bffadf0/go.mod h1:gum5aAgfkohBkDTmIabrl06Qht7QKxscuflLWFG5kXA= 3 | -------------------------------------------------------------------------------- /img/benchmark_full_decoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/benchmark_full_decoder.png -------------------------------------------------------------------------------- /img/benchmark_full_encoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/benchmark_full_encoder.png -------------------------------------------------------------------------------- /img/benchmark_full_recoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/benchmark_full_recoder.png -------------------------------------------------------------------------------- /img/benchmark_systematic_decoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/benchmark_systematic_decoder.png -------------------------------------------------------------------------------- /img/benchmark_systematic_encoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/benchmark_systematic_encoder.png -------------------------------------------------------------------------------- /img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/logo.png -------------------------------------------------------------------------------- /img/systematic_rlnc_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itzmeanjan/kodr/f4256dfb95acdff3cdd8de7c27b76df1737402de/img/systematic_rlnc_example.png -------------------------------------------------------------------------------- /matrix/decoder_state.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | ) 7 | 8 | type DecoderState struct { 9 | field *galoisfield.GF 10 | pieceCount uint 11 | coeffs Matrix 12 | coded Matrix 13 | } 14 | 15 | func min(a, b int) int { 16 | if a <= b { 17 | return a 18 | } 19 | return b 20 | } 21 | 22 | func (d *DecoderState) clean_forward() { 23 | var ( 24 | rows int = int(d.coeffs.Rows()) 25 | cols int = int(d.coeffs.Cols()) 26 | boundary int = min(rows, cols) 27 | ) 28 | 29 | for i := 0; i < boundary; i++ { 30 | if d.coeffs[i][i] == 0 { 31 | non_zero_col := false 32 | pivot := i + 1 33 | for ; pivot < rows; pivot++ { 34 | if d.coeffs[pivot][i] != 0 { 35 | non_zero_col = true 36 | break 37 | } 38 | } 39 | 40 | if !non_zero_col { 41 | continue 42 | } 43 | 44 | // row switching in coefficient matrix 45 | { 46 | tmp := d.coeffs[i] 47 | d.coeffs[i] = d.coeffs[pivot] 48 | d.coeffs[pivot] = tmp 49 | } 50 | // row switching in coded piece matrix 51 | { 52 | tmp := d.coded[i] 53 | d.coded[i] = d.coded[pivot] 54 | d.coded[pivot] = tmp 55 | } 56 | } 57 | 58 | for j := i + 1; j < rows; j++ { 59 | if d.coeffs[j][i] == 0 { 60 | continue 61 | } 62 | 63 | quotient := d.field.Div(d.coeffs[j][i], d.coeffs[i][i]) 64 | for k := i; k < cols; k++ { 65 | d.coeffs[j][k] = d.field.Add(d.coeffs[j][k], d.field.Mul(d.coeffs[i][k], quotient)) 66 | } 67 | 68 | for k := 0; k < len(d.coded[0]); k++ { 69 | d.coded[j][k] = d.field.Add(d.coded[j][k], d.field.Mul(d.coded[i][k], quotient)) 70 | } 71 | } 72 | } 73 | } 74 | 75 | func (d *DecoderState) clean_backward() { 76 | var ( 77 | rows int = int(d.coeffs.Rows()) 78 | cols int = int(d.coeffs.Cols()) 79 | boundary int = min(rows, cols) 80 | ) 81 | 82 | for i := boundary - 1; i >= 0; i-- { 83 | if d.coeffs[i][i] == 0 { 84 | continue 85 | } 86 | 87 | for j := 0; j < i; j++ { 88 | if d.coeffs[j][i] == 0 { 89 | continue 90 | } 91 | 92 | quotient := d.field.Div(d.coeffs[j][i], d.coeffs[i][i]) 93 | for k := i; k < cols; k++ { 94 | d.coeffs[j][k] = d.field.Add(d.coeffs[j][k], d.field.Mul(d.coeffs[i][k], quotient)) 95 | } 96 | 97 | for k := 0; k < len(d.coded[0]); k++ { 98 | d.coded[j][k] = d.field.Add(d.coded[j][k], d.field.Mul(d.coded[i][k], quotient)) 99 | } 100 | 101 | } 102 | 103 | if d.coeffs[i][i] == 1 { 104 | continue 105 | } 106 | 107 | inv := d.field.Div(1, d.coeffs[i][i]) 108 | d.coeffs[i][i] = 1 109 | for j := i + 1; j < cols; j++ { 110 | if d.coeffs[i][j] == 0 { 111 | continue 112 | } 113 | 114 | d.coeffs[i][j] = d.field.Mul(d.coeffs[i][j], inv) 115 | } 116 | 117 | for j := 0; j < len(d.coded[0]); j++ { 118 | d.coded[i][j] = d.field.Mul(d.coded[i][j], inv) 119 | } 120 | } 121 | } 122 | 123 | func (d *DecoderState) remove_zero_rows() { 124 | var ( 125 | cols = len(d.coeffs[0]) 126 | ) 127 | 128 | for i := 0; i < len(d.coeffs); i++ { 129 | yes := true 130 | for j := 0; j < cols; j++ { 131 | if d.coeffs[i][j] != 0 { 132 | yes = false 133 | break 134 | } 135 | } 136 | if !yes { 137 | continue 138 | } 139 | 140 | // resize `coeffs` matrix 141 | d.coeffs[i] = nil 142 | copy((d.coeffs)[i:], (d.coeffs)[i+1:]) 143 | d.coeffs = (d.coeffs)[:len(d.coeffs)-1] 144 | 145 | // resize `coded` matrix 146 | d.coded[i] = nil 147 | copy((d.coded)[i:], (d.coded)[i+1:]) 148 | d.coded = (d.coded)[:len(d.coded)-1] 149 | 150 | i = i - 1 151 | } 152 | } 153 | 154 | // Calculates Reduced Row Echelon Form of coefficient 155 | // matrix, while also modifying coded piece matrix 156 | // First it forward, backward cleans up matrix 157 | // i.e. cells other than pivots are zeroed, 158 | // later it checks if some rows of coefficient matrix 159 | // are linearly dependent or not, if yes it removes those, 160 | // while respective rows of coded piece matrix is also 161 | // removed --- considered to be `not useful piece` 162 | // 163 | // Note: All operations are in-place, no more memory 164 | // allocations are performed 165 | func (d *DecoderState) Rref() { 166 | d.clean_forward() 167 | d.clean_backward() 168 | d.remove_zero_rows() 169 | } 170 | 171 | // Expected to be invoked after RREF-ed, in other words 172 | // it won't rref matrix first to calculate rank, 173 | // rather that needs to first invoked 174 | func (d *DecoderState) Rank() uint { 175 | return d.coeffs.Rows() 176 | } 177 | 178 | // Current state of coding coefficient matrix 179 | func (d *DecoderState) CoefficientMatrix() Matrix { 180 | return d.coeffs 181 | } 182 | 183 | // Current state of coded piece matrix, which is updated 184 | // along side coding coefficient matrix ( during rref ) 185 | func (d *DecoderState) CodedPieceMatrix() Matrix { 186 | return d.coded 187 | } 188 | 189 | // Adds a new coded piece to decoder state, which will hopefully 190 | // help in decoding pieces, if linearly independent with other rows 191 | // i.e. read pieces 192 | func (d *DecoderState) AddPiece(codedPiece *kodr.CodedPiece) { 193 | d.coeffs = append(d.coeffs, codedPiece.Vector) 194 | d.coded = append(d.coded, codedPiece.Piece) 195 | } 196 | 197 | // Request decoded piece by index ( 0 based, definitely ) 198 | // 199 | // If piece not yet decoded/ requested index is >= #-of 200 | // pieces coded together, returns error message indicating so 201 | // 202 | // Otherwise piece is returned, without any error 203 | // 204 | // Note: This method will copy decoded piece into newly allocated memory 205 | // when whole decoding hasn't yet happened, to prevent any chance 206 | // that user mistakenly modifies slice returned ( read piece ) 207 | // & that affects next round of decoding ( when new piece is received ) 208 | func (d *DecoderState) GetPiece(idx uint) (kodr.Piece, error) { 209 | if idx >= d.pieceCount { 210 | return nil, kodr.ErrPieceOutOfBound 211 | } 212 | if idx >= d.coeffs.Rows() { 213 | return nil, kodr.ErrPieceNotDecodedYet 214 | } 215 | 216 | if d.Rank() >= d.pieceCount { 217 | return d.coded[idx], nil 218 | } 219 | 220 | cols := int(d.coeffs.Cols()) 221 | decoded := true 222 | 223 | OUT: 224 | for i := 0; i < cols; i++ { 225 | switch i { 226 | case int(idx): 227 | if d.coeffs[idx][i] != 1 { 228 | decoded = false 229 | break OUT 230 | } 231 | 232 | default: 233 | if d.coeffs[idx][i] == 0 { 234 | decoded = false 235 | break OUT 236 | } 237 | 238 | } 239 | } 240 | 241 | if !decoded { 242 | return nil, kodr.ErrPieceNotDecodedYet 243 | } 244 | 245 | buf := make([]byte, d.coded.Cols()) 246 | copy(buf, d.coded[idx]) 247 | return buf, nil 248 | } 249 | 250 | func NewDecoderStateWithPieceCount(gf *galoisfield.GF, pieceCount uint) *DecoderState { 251 | coeffs := make([][]byte, 0, pieceCount) 252 | coded := make([][]byte, 0, pieceCount) 253 | return &DecoderState{field: gf, pieceCount: pieceCount, coeffs: coeffs, coded: coded} 254 | } 255 | 256 | func NewDecoderState(gf *galoisfield.GF, coeffs, coded Matrix) *DecoderState { 257 | return &DecoderState{field: gf, pieceCount: uint(len(coeffs)), coeffs: coeffs, coded: coded} 258 | } 259 | -------------------------------------------------------------------------------- /matrix/matrix.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | ) 7 | 8 | type Matrix [][]byte 9 | 10 | // Cell by cell value comparision of two matrices, which 11 | // returns `true` only if all cells are found to be equal 12 | func (m *Matrix) Cmp(m_ Matrix) bool { 13 | if m.Rows() != m_.Rows() || m.Cols() != m_.Cols() { 14 | return false 15 | } 16 | 17 | for i := range *m { 18 | for j := range (*m)[i] { 19 | if (*m)[i][j] != m_[i][j] { 20 | return false 21 | } 22 | } 23 | } 24 | return true 25 | } 26 | 27 | // #-of rows in matrix 28 | // 29 | // This may change in runtime, when some rows are removed 30 | // as they're found to be linearly dependent with some other 31 | // row, after application of RREF 32 | func (m *Matrix) Rows() uint { 33 | return uint(len(*m)) 34 | } 35 | 36 | // #-of columns in matrix 37 | // 38 | // This isn't expected to change after initialised 39 | func (m *Matrix) Cols() uint { 40 | return uint(len((*m)[0])) 41 | } 42 | 43 | // Multiplies two matrices ( which can be multiplied ) 44 | // in order `m x with` 45 | func (m *Matrix) Multiply(field *galoisfield.GF, with Matrix) (Matrix, error) { 46 | if m.Cols() != with.Rows() { 47 | return nil, kodr.ErrMatrixDimensionMismatch 48 | } 49 | 50 | mult := make([][]byte, m.Rows()) 51 | for i := 0; i < len(*m); i++ { 52 | mult[i] = make([]byte, with.Cols()) 53 | } 54 | 55 | for i := 0; i < int(m.Rows()); i++ { 56 | for j := 0; j < int(with.Cols()); j++ { 57 | 58 | for k := 0; k < int(m.Cols()); k++ { 59 | mult[i][j] = field.Add(mult[i][j], field.Mul((*m)[i][k], with[k][j])) 60 | } 61 | 62 | } 63 | } 64 | 65 | return mult, nil 66 | } 67 | -------------------------------------------------------------------------------- /matrix/matrix_bench_test.go: -------------------------------------------------------------------------------- 1 | package matrix_test 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | "time" 7 | 8 | "github.com/cloud9-tools/go-galoisfield" 9 | "github.com/itzmeanjan/kodr/matrix" 10 | ) 11 | 12 | // Note: If fill_with_zero is set, it's not really a random matrix 13 | func random_matrix(rows, cols int, fill_with_zero bool) [][]byte { 14 | mat := make([][]byte, 0, rows) 15 | for i := 0; i < rows; i++ { 16 | row := make([]byte, cols) 17 | // already filled with zero 18 | if !fill_with_zero { 19 | rand.Read(row) 20 | } 21 | mat = append(mat, row) 22 | } 23 | return mat 24 | } 25 | 26 | func BenchmarkMatrixRref(b *testing.B) { 27 | rand.Seed(time.Now().UnixNano()) 28 | gf := galoisfield.DefaultGF256 29 | 30 | b.Run("2x2", func(b *testing.B) { rref(b, 1<<1, gf) }) 31 | b.Run("4x4", func(b *testing.B) { rref(b, 1<<2, gf) }) 32 | b.Run("8x8", func(b *testing.B) { rref(b, 1<<3, gf) }) 33 | b.Run("16x16", func(b *testing.B) { rref(b, 1<<4, gf) }) 34 | b.Run("32x32", func(b *testing.B) { rref(b, 1<<5, gf) }) 35 | b.Run("64x64", func(b *testing.B) { rref(b, 1<<6, gf) }) 36 | b.Run("128x128", func(b *testing.B) { rref(b, 1<<7, gf) }) 37 | b.Run("256x256", func(b *testing.B) { rref(b, 1<<8, gf) }) 38 | b.Run("512x512", func(b *testing.B) { rref(b, 1<<9, gf) }) 39 | b.Run("1024x1024", func(b *testing.B) { rref(b, 1<<10, gf) }) 40 | } 41 | 42 | func rref(b *testing.B, dim int, gf *galoisfield.GF) { 43 | b.ResetTimer() 44 | b.SetBytes(int64(dim*dim) << 1) 45 | b.ReportAllocs() 46 | 47 | for i := 0; i < b.N; i++ { 48 | coeffs := random_matrix(dim, dim, false) 49 | coded := random_matrix(dim, dim, true) 50 | d_state := matrix.NewDecoderState(gf, coeffs, coded) 51 | d_state.Rref() 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /matrix/matrix_test.go: -------------------------------------------------------------------------------- 1 | package matrix_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/cloud9-tools/go-galoisfield" 9 | "github.com/itzmeanjan/kodr" 10 | "github.com/itzmeanjan/kodr/matrix" 11 | ) 12 | 13 | func TestMatrixRref(t *testing.T) { 14 | field := galoisfield.DefaultGF256 15 | 16 | { 17 | m := matrix.Matrix{{70, 137, 2, 152}, {223, 92, 234, 98}, {217, 141, 33, 44}, {145, 135, 71, 45}} 18 | m_rref := matrix.Matrix{{1, 0, 0, 105}, {0, 1, 0, 181}, {0, 0, 1, 42}} 19 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 20 | 21 | dec := matrix.NewDecoderState(field, m, coded) 22 | dec.Rref() 23 | res := dec.CoefficientMatrix() 24 | if !res.Cmp(m_rref) { 25 | t.Fatal("rref doesn't match !") 26 | } 27 | } 28 | 29 | { 30 | m := matrix.Matrix{{68, 54, 6, 230}, {16, 56, 215, 78}, {159, 186, 146, 163}, {122, 41, 205, 133}} 31 | m_rref := matrix.Matrix{{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}, {0, 0, 0, 1}} 32 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 33 | 34 | dec := matrix.NewDecoderState(field, m, coded) 35 | dec.Rref() 36 | res := dec.CoefficientMatrix() 37 | if !res.Cmp(m_rref) { 38 | t.Fatal("rref doesn't match !") 39 | } 40 | } 41 | 42 | { 43 | m := matrix.Matrix{{100, 31, 76, 199, 119}, {207, 34, 207, 208, 18}, {62, 20, 54, 6, 187}, {66, 8, 52, 73, 54}, {122, 138, 247, 211, 165}} 44 | m_rref := matrix.Matrix{{1, 0, 0, 0, 0}, {0, 1, 0, 0, 0}, {0, 0, 1, 0, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 0, 1}} 45 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 46 | 47 | dec := matrix.NewDecoderState(field, m, coded) 48 | dec.Rref() 49 | res := dec.CoefficientMatrix() 50 | if !res.Cmp(m_rref) { 51 | t.Fatal("rref doesn't match !") 52 | } 53 | } 54 | } 55 | 56 | func TestMatrixRank(t *testing.T) { 57 | field := galoisfield.DefaultGF256 58 | 59 | { 60 | m := matrix.Matrix{{70, 137, 2, 152}, {223, 92, 234, 98}, {217, 141, 33, 44}, {145, 135, 71, 45}} 61 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 62 | 63 | dec := matrix.NewDecoderState(field, m, coded) 64 | dec.Rref() 65 | if rank := dec.Rank(); rank != 3 { 66 | t.Fatalf("expected rank 3, received %d", rank) 67 | } 68 | } 69 | 70 | { 71 | 72 | m := matrix.Matrix{{68, 54, 6, 230}, {16, 56, 215, 78}, {159, 186, 146, 163}, {122, 41, 205, 133}} 73 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 74 | 75 | dec := matrix.NewDecoderState(field, m, coded) 76 | dec.Rref() 77 | if rank := dec.Rank(); rank != 4 { 78 | t.Fatalf("expected rank 4, received %d", rank) 79 | } 80 | } 81 | 82 | { 83 | m := matrix.Matrix{{100, 31, 76, 199, 119}, {207, 34, 207, 208, 18}, {62, 20, 54, 6, 187}, {66, 8, 52, 73, 54}, {122, 138, 247, 211, 165}} 84 | coded := matrix.Matrix{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}} 85 | 86 | dec := matrix.NewDecoderState(field, m, coded) 87 | dec.Rref() 88 | if rank := dec.Rank(); rank != 5 { 89 | t.Fatalf("expected rank 5, received %d", rank) 90 | } 91 | } 92 | } 93 | 94 | func TestMatrixMultiplication(t *testing.T) { 95 | field := galoisfield.DefaultGF256 96 | 97 | m_1 := matrix.Matrix{{102, 82, 165, 0}} 98 | m_2 := matrix.Matrix{{157, 233, 247}, {160, 28, 233}, {149, 234, 117}, {200, 181, 55}} 99 | m_3 := matrix.Matrix{{1, 2, 3}} 100 | expected := matrix.Matrix{{186, 23, 11}} 101 | 102 | if _, err := m_3.Multiply(field, m_2); !(err != nil && errors.Is(err, kodr.ErrMatrixDimensionMismatch)) { 103 | t.Fatal("expected failed matrix multiplication error indication") 104 | } 105 | 106 | mult, err := m_1.Multiply(field, m_2) 107 | if err != nil { 108 | t.Fatal(err.Error()) 109 | } 110 | 111 | for i := 0; i < int(expected.Rows()); i++ { 112 | if !bytes.Equal(expected[i], mult[i]) { 113 | t.Fatal("row mismatch !") 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /systematic/decoder.go: -------------------------------------------------------------------------------- 1 | package systematic 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | "github.com/itzmeanjan/kodr/matrix" 7 | ) 8 | 9 | type SystematicRLNCDecoder struct { 10 | expected, useful, received uint 11 | state *matrix.DecoderState 12 | } 13 | 14 | // Each piece of N-many bytes 15 | // 16 | // Note: If no pieces are yet added to decoder state, then 17 | // returns 0, denoting **unknown** 18 | func (s *SystematicRLNCDecoder) PieceLength() uint { 19 | if s.received > 0 { 20 | coded := s.state.CodedPieceMatrix() 21 | return coded.Cols() 22 | } 23 | 24 | return 0 25 | } 26 | 27 | // Already decoded back to original pieces, with collected pieces ? 28 | // 29 | // If yes, no more pieces need to be collected 30 | func (s *SystematicRLNCDecoder) IsDecoded() bool { 31 | return s.useful >= s.expected 32 | } 33 | 34 | // How many more pieces are required to be collected so that 35 | // whole data can be decoded successfully ? 36 | // 37 | // After collecting these many pieces, original data can be decoded 38 | func (s *SystematicRLNCDecoder) Required() uint { 39 | return s.expected - s.useful 40 | } 41 | 42 | // Add one more collected coded piece, which will be used for decoding 43 | // back to original pieces 44 | // 45 | // If all required pieces are already collected i.e. successful decoding 46 | // has happened --- new pieces to be discarded, with an error denoting same 47 | func (s *SystematicRLNCDecoder) AddPiece(piece *kodr.CodedPiece) error { 48 | if s.IsDecoded() { 49 | return kodr.ErrAllUsefulPiecesReceived 50 | } 51 | 52 | s.state.AddPiece(piece) 53 | s.received++ 54 | if !(s.received > 1) { 55 | s.useful++ 56 | return nil 57 | } 58 | 59 | s.state.Rref() 60 | s.useful = s.state.Rank() 61 | return nil 62 | } 63 | 64 | // GetPiece - Get a decoded piece by index, may ( not ) succeed ! 65 | // 66 | // Note: It's not necessary that full decoding needs to happen 67 | // for this method to return something useful 68 | // 69 | // If M-many pieces are received among N-many expected ( read M <= N ) 70 | // then pieces with index in [0..M] ( remember upper bound exclusive ) 71 | // can be attempted to be consumed, given algebric structure has revealed 72 | // requested piece at index `i` 73 | func (s *SystematicRLNCDecoder) GetPiece(i uint) (kodr.Piece, error) { 74 | return s.state.GetPiece(i) 75 | } 76 | 77 | // All original pieces in order --- only when full decoding has happened 78 | func (s *SystematicRLNCDecoder) GetPieces() ([]kodr.Piece, error) { 79 | if !s.IsDecoded() { 80 | return nil, kodr.ErrMoreUsefulPiecesRequired 81 | } 82 | 83 | pieces := make([]kodr.Piece, 0, s.useful) 84 | for i := 0; i < int(s.useful); i++ { 85 | piece, err := s.GetPiece(uint(i)) 86 | if err != nil { 87 | return nil, err 88 | } 89 | pieces = append(pieces, piece) 90 | } 91 | return pieces, nil 92 | } 93 | 94 | // Pieces coded by systematic mean, along with randomly coded pieces, 95 | // are decoded with this decoder 96 | // 97 | // @note Actually FullRLNCDecoder could have been used for same purpose 98 | // making this one redundant 99 | // 100 | // I'll consider improving decoding by exploiting 101 | // systematic coded pieces ( vectors )/ removing this 102 | // in some future date 103 | func NewSystematicRLNCDecoder(pieceCount uint) *SystematicRLNCDecoder { 104 | gf := galoisfield.DefaultGF256 105 | state := matrix.NewDecoderStateWithPieceCount(gf, pieceCount) 106 | return &SystematicRLNCDecoder{expected: pieceCount, state: state} 107 | } 108 | -------------------------------------------------------------------------------- /systematic/decoder_test.go: -------------------------------------------------------------------------------- 1 | package systematic_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math/rand" 7 | "testing" 8 | "time" 9 | 10 | "github.com/itzmeanjan/kodr" 11 | "github.com/itzmeanjan/kodr/systematic" 12 | ) 13 | 14 | func TestNewSystematicRLNCDecoder(t *testing.T) { 15 | rand.Seed(time.Now().UnixNano()) 16 | 17 | var ( 18 | pieceCount uint = 128 19 | pieceLength uint = 8192 20 | pieces []kodr.Piece = generatePieces(pieceCount, pieceLength) 21 | enc *systematic.SystematicRLNCEncoder = systematic.NewSystematicRLNCEncoder(pieces) 22 | dec *systematic.SystematicRLNCDecoder = systematic.NewSystematicRLNCDecoder(pieceCount) 23 | ) 24 | 25 | for { 26 | c_piece := enc.CodedPiece() 27 | 28 | // simulate random coded_piece drop/ loss 29 | if rand.Intn(2) == 0 { 30 | continue 31 | } 32 | 33 | err := dec.AddPiece(c_piece) 34 | if errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 35 | if v := dec.Required(); v != 0 { 36 | t.Fatalf("required piece count should be: %d\n", v) 37 | } 38 | break 39 | } 40 | } 41 | 42 | d_pieces, err := dec.GetPieces() 43 | if err != nil { 44 | t.Fatalf("Error: %s\n", err.Error()) 45 | } 46 | 47 | if len(d_pieces) != len(pieces) { 48 | t.Fatal("didn't decode all !") 49 | } 50 | 51 | for i := 0; i < int(pieceCount); i++ { 52 | if !bytes.Equal(pieces[i], d_pieces[i]) { 53 | t.Fatal("decoded data doesn't match !") 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /systematic/encoder.go: -------------------------------------------------------------------------------- 1 | package systematic 2 | 3 | import ( 4 | "github.com/cloud9-tools/go-galoisfield" 5 | "github.com/itzmeanjan/kodr" 6 | ) 7 | 8 | type SystematicRLNCEncoder struct { 9 | currentPieceId uint 10 | field *galoisfield.GF 11 | pieces []kodr.Piece 12 | extra uint 13 | } 14 | 15 | // Total #-of pieces being coded together --- denoting 16 | // these many linearly independent pieces are required 17 | // successfully decoding back to original pieces 18 | func (s *SystematicRLNCEncoder) PieceCount() uint { 19 | return uint(len(s.pieces)) 20 | } 21 | 22 | // Pieces which are coded together are all of same size 23 | // 24 | // Total data being coded = pieceSize * pieceCount ( may include 25 | // some padding bytes ) 26 | func (s *SystematicRLNCEncoder) PieceSize() uint { 27 | return uint(len(s.pieces[0])) 28 | } 29 | 30 | // How many bytes of data, constructed by concatenating 31 | // coded pieces together, required at minimum for decoding 32 | // back to original pieces ? 33 | // 34 | // As I'm coding N-many pieces together, I need at least N-many 35 | // linearly independent pieces, which are concatenated together 36 | // to form a byte slice & can be used for original data reconstruction. 37 | // 38 | // So it computes N * codedPieceLen 39 | func (s *SystematicRLNCEncoder) DecodableLen() uint { 40 | return s.PieceCount() * s.CodedPieceLen() 41 | } 42 | 43 | // If N-many original pieces are coded together 44 | // what could be length of one such coded piece 45 | // obtained by invoking `CodedPiece` ? 46 | // 47 | // Here N = len(pieces), original pieces which are 48 | // being coded together 49 | func (s *SystematicRLNCEncoder) CodedPieceLen() uint { 50 | return s.PieceCount() + s.PieceSize() 51 | } 52 | 53 | // If any extra padding bytes added at end of original 54 | // data slice for making all pieces of same size, 55 | // returned value will be >0 56 | func (s *SystematicRLNCEncoder) Padding() uint { 57 | return s.extra 58 | } 59 | 60 | // Generates a systematic coded piece's coding vector, which has 61 | // only one non-zero element ( 1 ) 62 | func (s *SystematicRLNCEncoder) systematicCodingVector(idx uint) kodr.CodingVector { 63 | if !(idx < s.PieceCount()) { 64 | return nil 65 | } 66 | 67 | vector := make(kodr.CodingVector, s.PieceCount()) 68 | vector[idx] = 1 69 | return vector 70 | } 71 | 72 | // For systematic coding, first N-piece are returned in uncoded form 73 | // i.e. coding vectors are having only single non-zero element ( 1 ) 74 | // in respective index of piece 75 | // 76 | // Piece index `i` ( returned from this method ), where i < N 77 | // is going to have coding vector = [N]byte, where only i'th index 78 | // of this vector will have 1, all other fields will have 0. 79 | // 80 | // Here N = #-of pieces being coded together 81 | // 82 | // Later pieces are coded as they're done in Full RLNC scheme 83 | // `i` keeps incrementing by +1, until it reaches N 84 | func (s *SystematicRLNCEncoder) CodedPiece() *kodr.CodedPiece { 85 | if s.currentPieceId < s.PieceCount() { 86 | // `nil` coding vector can be returned, which is 87 | // not being checked at all, as in that case we'll 88 | // never get into `if` branch 89 | vector := s.systematicCodingVector(s.currentPieceId) 90 | piece := make(kodr.Piece, s.PieceSize()) 91 | copy(piece, s.pieces[s.currentPieceId]) 92 | 93 | s.currentPieceId++ 94 | return &kodr.CodedPiece{ 95 | Vector: vector, 96 | Piece: piece, 97 | } 98 | } 99 | 100 | vector := kodr.GenerateCodingVector(s.PieceCount()) 101 | piece := make(kodr.Piece, s.PieceSize()) 102 | for i := range s.pieces { 103 | piece.Multiply(s.pieces[i], vector[i], s.field) 104 | } 105 | return &kodr.CodedPiece{ 106 | Vector: vector, 107 | Piece: piece, 108 | } 109 | } 110 | 111 | // When you've already splitted original data chunk into pieces 112 | // of same length ( in terms of bytes ), this function can be used 113 | // for creating one systematic RLNC encoder, which delivers coded pieces 114 | // on-the-fly 115 | func NewSystematicRLNCEncoder(pieces []kodr.Piece) *SystematicRLNCEncoder { 116 | return &SystematicRLNCEncoder{currentPieceId: 0, pieces: pieces, field: galoisfield.DefaultGF256} 117 | } 118 | 119 | // If you know #-of pieces you want to code together, invoking 120 | // this function splits whole data chunk into N-pieces, with padding 121 | // bytes appended at end of last piece, if required & prepares 122 | // full RLNC encoder for obtaining coded pieces 123 | func NewSystematicRLNCEncoderWithPieceCount(data []byte, pieceCount uint) (*SystematicRLNCEncoder, error) { 124 | pieces, padding, err := kodr.OriginalPiecesFromDataAndPieceCount(data, pieceCount) 125 | if err != nil { 126 | return nil, err 127 | } 128 | 129 | enc := NewSystematicRLNCEncoder(pieces) 130 | enc.extra = padding 131 | return enc, nil 132 | } 133 | 134 | // If you want to have N-bytes piece size for each, this 135 | // function generates M-many pieces each of N-bytes size, which are ready 136 | // to be coded together with full RLNC 137 | func NewSystematicRLNCEncoderWithPieceSize(data []byte, pieceSize uint) (*SystematicRLNCEncoder, error) { 138 | pieces, padding, err := kodr.OriginalPiecesFromDataAndPieceSize(data, pieceSize) 139 | if err != nil { 140 | return nil, err 141 | } 142 | 143 | enc := NewSystematicRLNCEncoder(pieces) 144 | enc.extra = padding 145 | return enc, nil 146 | } 147 | -------------------------------------------------------------------------------- /systematic/encoder_test.go: -------------------------------------------------------------------------------- 1 | package systematic_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "math" 7 | "math/rand" 8 | "testing" 9 | "time" 10 | 11 | "github.com/itzmeanjan/kodr" 12 | "github.com/itzmeanjan/kodr/systematic" 13 | ) 14 | 15 | // Generates `N`-bytes of random data from default 16 | // randomization source 17 | func generateData(n uint) []byte { 18 | data := make([]byte, n) 19 | // can safely ignore error 20 | rand.Read(data) 21 | return data 22 | } 23 | 24 | // Generates N-many pieces each of M-bytes length, to be used 25 | // for testing purposes 26 | func generatePieces(pieceCount uint, pieceLength uint) []kodr.Piece { 27 | pieces := make([]kodr.Piece, 0, pieceCount) 28 | for i := 0; i < int(pieceCount); i++ { 29 | pieces = append(pieces, generateData(pieceLength)) 30 | } 31 | return pieces 32 | } 33 | 34 | func TestSystematicRLNCCoding(t *testing.T) { 35 | rand.Seed(time.Now().UnixNano()) 36 | 37 | var ( 38 | pieceCount uint = uint(2<<1 + rand.Intn(2<<8)) 39 | pieceLength uint = 8192 40 | codedPieceCount uint = pieceCount * 2 41 | pieces []kodr.Piece = generatePieces(pieceCount, pieceLength) 42 | enc *systematic.SystematicRLNCEncoder = systematic.NewSystematicRLNCEncoder(pieces) 43 | ) 44 | 45 | for i := 0; i < int(codedPieceCount); i++ { 46 | c_piece := enc.CodedPiece() 47 | if i < int(pieceCount) { 48 | if !c_piece.IsSystematic() { 49 | t.Fatal("expected piece to be systematic coded") 50 | } 51 | } else { 52 | if c_piece.IsSystematic() { 53 | t.Fatal("expected piece to be random coded") 54 | } 55 | } 56 | } 57 | } 58 | 59 | func TestNewSystematicRLNC(t *testing.T) { 60 | rand.Seed(time.Now().UnixNano()) 61 | 62 | t.Run("Encoder", func(t *testing.T) { 63 | var ( 64 | pieceCount uint = 1 << 8 65 | pieceLength uint = 8192 66 | ) 67 | 68 | pieces := generatePieces(pieceCount, pieceLength) 69 | enc := systematic.NewSystematicRLNCEncoder(pieces) 70 | dec := systematic.NewSystematicRLNCDecoder(pieceCount) 71 | 72 | encoderFlow(t, enc, dec, pieceCount, pieces) 73 | }) 74 | 75 | t.Run("EncoderWithPieceCount", func(t *testing.T) { 76 | size := uint(2<<10 + rand.Intn(2<<10)) 77 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 78 | data := generateData(size) 79 | 80 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 81 | if err != nil { 82 | t.Fatalf("Error: %s\n", err.Error()) 83 | } 84 | 85 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceCount(data, pieceCount) 86 | if err != nil { 87 | t.Fatal(err.Error()) 88 | } 89 | 90 | dec := systematic.NewSystematicRLNCDecoder(pieceCount) 91 | encoderFlow(t, enc, dec, pieceCount, pieces) 92 | }) 93 | 94 | t.Run("EncoderWithPieceSize", func(t *testing.T) { 95 | size := uint(2<<10 + rand.Intn(2<<10)) 96 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 97 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 98 | data := generateData(size) 99 | 100 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(data, pieceSize) 101 | if err != nil { 102 | t.Fatalf("Error: %s\n", err.Error()) 103 | } 104 | 105 | pieces, _, err := kodr.OriginalPiecesFromDataAndPieceSize(data, pieceSize) 106 | if err != nil { 107 | t.Fatal(err.Error()) 108 | } 109 | 110 | dec := systematic.NewSystematicRLNCDecoder(pieceCount) 111 | encoderFlow(t, enc, dec, pieceCount, pieces) 112 | }) 113 | } 114 | 115 | func encoderFlow(t *testing.T, enc *systematic.SystematicRLNCEncoder, dec *systematic.SystematicRLNCDecoder, pieceCount uint, pieces []kodr.Piece) { 116 | for { 117 | c_piece := enc.CodedPiece() 118 | 119 | if rand.Intn(2) == 0 { 120 | continue 121 | } 122 | 123 | if err := dec.AddPiece(c_piece); err != nil && errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 124 | break 125 | } 126 | } 127 | 128 | d_pieces, err := dec.GetPieces() 129 | if err != nil { 130 | t.Fatal(err.Error()) 131 | } 132 | 133 | if len(pieces) != len(d_pieces) { 134 | t.Fatal("didn't decode all !") 135 | } 136 | 137 | for i := 0; i < int(pieceCount); i++ { 138 | if !bytes.Equal(pieces[i], d_pieces[i]) { 139 | t.Fatal("decoded data doesn't match !") 140 | } 141 | } 142 | } 143 | 144 | func TestSystematicRLNCEncoder_Padding(t *testing.T) { 145 | rand.Seed(time.Now().UnixNano()) 146 | 147 | t.Run("WithPieceCount", func(t *testing.T) { 148 | for i := 0; i < 1<<5; i++ { 149 | size := uint(2<<10 + rand.Intn(2<<10)) 150 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 151 | data := generateData(size) 152 | 153 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 154 | if err != nil { 155 | t.Fatalf("Error: %s\n", err.Error()) 156 | } 157 | 158 | extra := enc.Padding() 159 | pieceSize := (size + extra) / pieceCount 160 | c_piece := enc.CodedPiece() 161 | if uint(len(c_piece.Piece)) != pieceSize { 162 | t.Fatalf("expected pieceSize to be %dB, found to be %dB\n", pieceSize, len(c_piece.Piece)) 163 | } 164 | } 165 | }) 166 | 167 | t.Run("WithPieceSize", func(t *testing.T) { 168 | for i := 0; i < 1<<5; i++ { 169 | size := uint(2<<10 + rand.Intn(2<<10)) 170 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 171 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 172 | data := generateData(size) 173 | 174 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(data, pieceSize) 175 | if err != nil { 176 | t.Fatalf("Error: %s\n", err.Error()) 177 | } 178 | 179 | extra := enc.Padding() 180 | c_pieceSize := (size + extra) / pieceCount 181 | c_piece := enc.CodedPiece() 182 | if pieceSize != c_pieceSize || uint(len(c_piece.Piece)) != pieceSize { 183 | t.Fatalf("expected pieceSize to be %dB, found to be %dB\n", c_pieceSize, len(c_piece.Piece)) 184 | } 185 | } 186 | }) 187 | } 188 | 189 | func TestSystematicRLNCEncoder_CodedPieceLen(t *testing.T) { 190 | rand.Seed(time.Now().UnixNano()) 191 | 192 | t.Run("WithPieceCount", func(t *testing.T) { 193 | size := uint(2<<10 + rand.Intn(2<<10)) 194 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 195 | data := generateData(size) 196 | 197 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 198 | if err != nil { 199 | t.Fatalf("Error: %s\n", err.Error()) 200 | } 201 | 202 | for i := 0; i <= int(pieceCount); i++ { 203 | c_piece := enc.CodedPiece() 204 | if c_piece.Len() != enc.CodedPieceLen() { 205 | t.Fatalf("expected coded piece to be of %dB, found to be of %dB\n", enc.CodedPieceLen(), c_piece.Len()) 206 | } 207 | } 208 | }) 209 | 210 | t.Run("WithPieceSize", func(t *testing.T) { 211 | size := uint(2<<10 + rand.Intn(2<<10)) 212 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 213 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 214 | data := generateData(size) 215 | 216 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(data, pieceSize) 217 | if err != nil { 218 | t.Fatalf("Error: %s\n", err.Error()) 219 | } 220 | 221 | for i := 0; i <= int(pieceCount); i++ { 222 | c_piece := enc.CodedPiece() 223 | if c_piece.Len() != enc.CodedPieceLen() { 224 | t.Fatalf("expected coded piece to be of %dB, found to be of %dB\n", enc.CodedPieceLen(), c_piece.Len()) 225 | } 226 | } 227 | }) 228 | } 229 | 230 | func TestSystematicRLNCEncoder_DecodableLen(t *testing.T) { 231 | rand.Seed(time.Now().UnixNano()) 232 | 233 | flow := func(enc *systematic.SystematicRLNCEncoder, dec *systematic.SystematicRLNCDecoder) { 234 | consumed_len := uint(0) 235 | for !dec.IsDecoded() { 236 | c_piece := enc.CodedPiece() 237 | // randomly drop piece 238 | if rand.Intn(2) == 0 { 239 | continue 240 | } 241 | if err := dec.AddPiece(c_piece); errors.Is(err, kodr.ErrAllUsefulPiecesReceived) { 242 | break 243 | } 244 | 245 | // as consumed this piece --- accounting 246 | consumed_len += c_piece.Len() 247 | } 248 | 249 | if consumed_len < enc.DecodableLen() { 250 | t.Fatalf("expected to consume >=%dB for decoding, but actually consumed %dB\n", enc.DecodableLen(), consumed_len) 251 | } 252 | } 253 | 254 | t.Run("WithPieceCount", func(t *testing.T) { 255 | size := uint(2<<10 + rand.Intn(2<<10)) 256 | pieceCount := uint(2<<1 + rand.Intn(2<<8)) 257 | data := generateData(size) 258 | 259 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceCount(data, pieceCount) 260 | if err != nil { 261 | t.Fatalf("Error: %s\n", err.Error()) 262 | } 263 | 264 | dec := systematic.NewSystematicRLNCDecoder(pieceCount) 265 | flow(enc, dec) 266 | }) 267 | 268 | t.Run("WithPieceSize", func(t *testing.T) { 269 | size := uint(2<<10 + rand.Intn(2<<10)) 270 | pieceSize := uint(2<<5 + rand.Intn(2<<5)) 271 | pieceCount := uint(math.Ceil(float64(size) / float64(pieceSize))) 272 | data := generateData(size) 273 | 274 | enc, err := systematic.NewSystematicRLNCEncoderWithPieceSize(data, pieceSize) 275 | if err != nil { 276 | t.Fatalf("Error: %s\n", err.Error()) 277 | } 278 | 279 | dec := systematic.NewSystematicRLNCDecoder(pieceCount) 280 | flow(enc, dec) 281 | }) 282 | } 283 | --------------------------------------------------------------------------------