├── LICENCE ├── README.md ├── cmd ├── mndecode │ ├── hexout.go │ └── mndecode.go └── mnencode │ ├── hexin.go │ └── mnencode.go ├── fuzz.go ├── go.mod ├── issue002_test.go ├── mnemonicode.go ├── mnemonicode_test.go ├── scan_words.go └── word_list.go /LICENCE: -------------------------------------------------------------------------------- 1 | // From GitHub version/fork maintained by Stephen Paul Weber available at: 2 | // https://github.com/singpolyma/mnemonicode 3 | // 4 | // Originally from: 5 | // http://web.archive.org/web/20101031205747/http://www.tothink.com/mnemonic/ 6 | 7 | /* 8 | Copyright (c) 2000 Oren Tirosh 9 | 10 | Permission is hereby granted, free of charge, to any person obtaining a copy 11 | of this software and associated documentation files (the "Software"), to deal 12 | in the Software without restriction, including without limitation the rights 13 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 | copies of the Software, and to permit persons to whom the Software is 15 | furnished to do so, subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in 18 | all copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 23 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 | THE SOFTWARE. 27 | */ 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Mnemonicode 2 | =========== 3 | 4 | Mnemonicode is a method for encoding binary data into a sequence 5 | of words which can be spoken over the phone, for example, and converted 6 | back to data on the other side. 7 | 8 | [![GoDoc](https://godoc.org/bitbucket.org/dchapes/mnemonicode?status.png)](https://godoc.org/bitbucket.org/dchapes/mnemonicode) 9 | 10 | Online package documentation is available via 11 | [https://godoc.org/bitbucket.org/dchapes/mnemonicode](https://godoc.org/bitbucket.org/dchapes/mnemonicode). 12 | 13 | To install the package: 14 | 15 | go get bitbucket.org/dchapes/mnemonicode 16 | 17 | or the command line programs: 18 | 19 | go get bitbucket.org/dchapes/mnemonicode/cmd/... 20 | 21 | or `go build` any Go code that imports it: 22 | 23 | import "bitbucket.org/dchapes/mnemonicode" 24 | 25 | For more information see 26 | 27 | or 28 | 29 | 30 | From the README there: 31 | 32 | There are some other somewhat similar systems that seem less satisfactory: 33 | 34 | - OTP was designed for easy typing, and for minimizing length, but as 35 | a consequence the word list contains words that are similar ("AD" 36 | and "ADD") that are poor for dictating over the phone 37 | 38 | - PGPfone has optimized "maximum phonetic distance" between words, 39 | which resolves the above problem but has some other drawbacks: 40 | 41 | - Low efficiency, as it encodes a little less than 1 bit per 42 | character; 43 | 44 | - Word quality issues, as some words are somewhat obscure to 45 | non-native speakers of English, or are awkward to use or type. 46 | 47 | Mnemonic tries to do better by being more selective about its word 48 | list. Its criteria are thus: 49 | 50 | Mandatory Criteria: 51 | 52 | - The wordlist contains 1626 words. 53 | 54 | - All words are between 4 and 7 letters long. 55 | 56 | - No word in the list is a prefix of another word (e.g. visit, 57 | visitor). 58 | 59 | - Five letter prefixes of words are sufficient to be unique. 60 | 61 | Less Strict Criteria: 62 | 63 | - The words should be usable by people all over the world. The list 64 | is far from perfect in that respect. It is heavily biased towards 65 | western culture and English in particular. The international 66 | vocabulary is simply not big enough. One can argue that even words 67 | like "hotel" or "radio" are not truly international. You will find 68 | many English words in the list but I have tried to limit them to 69 | words that are part of a beginner's vocabulary or words that have 70 | close relatives in other european languages. In some cases a word 71 | has a different meaning in another language or is pronounced very 72 | differently but for the purpose of the encoding it is still ok - I 73 | assume that when the encoding is used for spoken communication 74 | both sides speak the same language. 75 | 76 | - The words should have more than one syllable. This makes them 77 | easier to recognize when spoken, especially over a phone 78 | line. Again, you will find many exceptions. For one syllable words 79 | I have tried to use words with 3 or more consonants or words with 80 | diphthongs, making for a longer and more distinct 81 | pronounciation. As a result of this requirement the average word 82 | length has increased. I do not consider this to be a problem since 83 | my goal in limiting the word length was not to reduce the average 84 | length of encoded data but to limit the maximum length to fit in 85 | fixed-size fields or a terminal line width. 86 | 87 | - No two words on the list should sound too much alike. Soundalikes 88 | such as "sweet" and "suite" are ruled out. One of the two is 89 | chosen and the other should be accepted by the decoder's 90 | soundalike matching code or using explicit aliases for some words. 91 | 92 | - No offensive words. The rule was to avoid words that I would not 93 | like to be printed on my business card. I have extended this to 94 | words that by themselves are not offensive but are too likely to 95 | create combinations that someone may find embarrassing or 96 | offensive. This includes words dealing with religion such as 97 | "church" or "jewish" and some words with negative meanings like 98 | "problem" or "fiasco". I am sure that a creative mind (or a random 99 | number generator) can find plenty of embarrasing or offensive word 100 | combinations using only words in the list but I have tried to 101 | avoid the more obvious ones. One of my tools for this was simply a 102 | generator of random word combinations - the problematic ones stick 103 | out like a sore thumb. 104 | 105 | - Avoid words with tricky spelling or pronounciation. Even if the 106 | receiver of the message can probably spell the word close enough 107 | for the soundalike matcher to recognize it correctly I prefer 108 | avoiding such words. I believe this will help users feel more 109 | comfortable using the system, increase the level of confidence and 110 | decrease the overall error rate. Most words in the list can be 111 | spelled more or less correctly from hearing, even without knowing 112 | the word. 113 | 114 | - The word should feel right for the job. I know, this one is very 115 | subjective but some words would meet all the criteria and still 116 | not feel right for the purpose of mnemonic encoding. The word 117 | should feel like one of the words in the radio phonetic alphabets 118 | (alpha, bravo, charlie, delta etc). 119 | -------------------------------------------------------------------------------- /cmd/mndecode/hexout.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "io" 6 | ) 7 | 8 | const bufsize = 256 9 | 10 | type hexdump struct { 11 | w io.Writer 12 | buf [bufsize]byte 13 | } 14 | 15 | func hexoutput(w io.Writer) io.WriteCloser { 16 | return &hexdump{w: w} 17 | } 18 | 19 | func (h *hexdump) Write(data []byte) (n int, err error) { 20 | for n < len(data) { 21 | amt := len(data) - n 22 | if hex.EncodedLen(amt) > bufsize { 23 | amt = hex.DecodedLen(bufsize) 24 | } 25 | nn := hex.Encode(h.buf[:], data[n:n+amt]) 26 | _, err := h.w.Write(h.buf[:nn]) 27 | n += amt 28 | if err != nil { 29 | return n, err 30 | } 31 | } 32 | return n, nil 33 | } 34 | 35 | func (h *hexdump) Close() error { 36 | _, err := h.w.Write([]byte{'\n'}) 37 | return err 38 | } 39 | -------------------------------------------------------------------------------- /cmd/mndecode/mndecode.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "io" 6 | "log" 7 | "os" 8 | "path" 9 | 10 | "github.com/schollz/mnemonicode" 11 | ) 12 | 13 | func main() { 14 | log.SetFlags(0) 15 | log.SetPrefix(path.Base(os.Args[0]) + ": ") 16 | hexFlag := flag.Bool("x", false, "hex output") 17 | verboseFlag := flag.Bool("v", false, "verbose") 18 | flag.Parse() 19 | if flag.NArg() > 0 { 20 | flag.Usage() 21 | os.Exit(2) 22 | } 23 | 24 | output := io.WriteCloser(os.Stdout) 25 | if *hexFlag { 26 | output = hexoutput(output) 27 | } 28 | 29 | var n int64 30 | var err error 31 | if true { 32 | dec := mnemonicode.NewDecoder(os.Stdin) 33 | n, err = io.Copy(output, dec) 34 | } else { 35 | w := mnemonicode.NewDecodeWriter(output) 36 | n, err = io.Copy(w, os.Stdin) 37 | if err != nil { 38 | log.Fatal(err) 39 | } 40 | err = w.Close() 41 | } 42 | if err != nil { 43 | log.Fatal(err) 44 | } 45 | if *verboseFlag { 46 | log.Println("bytes decoded:", n) 47 | } 48 | if err = output.Close(); err != nil { 49 | log.Fatal(err) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /cmd/mnencode/hexin.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "unicode" 6 | "unicode/utf8" 7 | 8 | "golang.org/x/text/transform" 9 | ) 10 | 11 | type hexinput bool 12 | 13 | func (h *hexinput) Reset() { 14 | *h = false 15 | } 16 | 17 | func (h *hexinput) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { 18 | for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { 19 | if r = rune(src[0]); r < utf8.RuneSelf { 20 | sz = 1 21 | } else { 22 | r, sz = utf8.DecodeRune(src) 23 | if sz == 1 { 24 | // Invalid rune. 25 | if !atEOF && !utf8.FullRune(src) { 26 | err = transform.ErrShortSrc 27 | break 28 | } 29 | // Just ignore it 30 | nSrc++ 31 | continue 32 | } 33 | } 34 | if unicode.IsSpace(r) { 35 | nSrc += sz 36 | continue 37 | } 38 | if sz > 1 { 39 | err = hex.InvalidByteError(src[0]) // XXX 40 | break 41 | } 42 | if len(src) < 2 { 43 | err = transform.ErrShortSrc 44 | break 45 | } 46 | if nDst+1 > len(dst) { 47 | err = transform.ErrShortDst 48 | break 49 | } 50 | 51 | sz = 2 52 | nSrc += 2 53 | if !*h { 54 | *h = true 55 | if r == '0' && (src[1] == 'x' || src[1] == 'X') { 56 | continue 57 | } 58 | } 59 | 60 | if _, err = hex.Decode(dst[nDst:], src[:2]); err != nil { 61 | break 62 | } 63 | nDst++ 64 | } 65 | return 66 | } 67 | -------------------------------------------------------------------------------- /cmd/mnencode/mnencode.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "log" 9 | "os" 10 | "path" 11 | "strconv" 12 | 13 | "github.com/schollz/mnemonicode" 14 | "golang.org/x/text/transform" 15 | ) 16 | 17 | type quoted string 18 | 19 | func (q quoted) Get() interface{} { return string(q) } 20 | func (q quoted) String() string { return strconv.Quote(string(q)) } 21 | func (q *quoted) Set(s string) (err error) { 22 | if s, err = strconv.Unquote(`"` + s + `"`); err == nil { 23 | *q = quoted(s) 24 | } 25 | return 26 | } 27 | 28 | type quotedRune rune 29 | 30 | func (qr quotedRune) Get() interface{} { return rune(qr) } 31 | func (qr quotedRune) String() string { return strconv.QuoteRune(rune(qr)) } 32 | func (qr *quotedRune) Set(s string) error { 33 | r, _, x, err := strconv.UnquoteChar(s, 0) 34 | if err != nil { 35 | return err 36 | } 37 | if x != "" { 38 | return fmt.Errorf("more than a single rune") 39 | } 40 | *qr = quotedRune(r) 41 | return nil 42 | } 43 | 44 | func main() { 45 | log.SetFlags(0) 46 | log.SetPrefix(path.Base(os.Args[0]) + ": ") 47 | vlog := log.New(os.Stderr, log.Prefix(), log.Flags()) 48 | 49 | config := mnemonicode.NewDefaultConfig() 50 | prefix := quoted(config.LinePrefix) 51 | suffix := quoted(config.LineSuffix) 52 | wordsep := quoted(config.WordSeparator) 53 | groupsep := quoted(config.GroupSeparator) 54 | pad := quotedRune(config.WordPadding) 55 | 56 | flag.Var(&prefix, "prefix", "prefix each line with `string`") 57 | flag.Var(&suffix, "suffix", "suffix each line with `string`") 58 | flag.Var(&wordsep, "word", "separate each word with `wsep`") 59 | flag.Var(&groupsep, "group", "separate each word group with `gsep`") 60 | words := flag.Uint("words", config.WordsPerGroup, "words per group") 61 | groups := flag.Uint("groups", config.GroupsPerLine, "groups per line") 62 | nopad := flag.Bool("nopad", false, "do not pad words") 63 | flag.Var(&pad, "pad", "pad shorter words with `rune`") 64 | hexin := flag.Bool("x", false, "hex input") 65 | verbose := flag.Bool("v", false, "verbose") 66 | 67 | flag.Parse() 68 | if flag.NArg() > 0 { 69 | flag.Usage() 70 | os.Exit(2) 71 | } 72 | 73 | if !*verbose { 74 | vlog.SetOutput(ioutil.Discard) 75 | } 76 | 77 | config.LinePrefix = prefix.Get().(string) 78 | config.LineSuffix = suffix.Get().(string) 79 | config.GroupSeparator = groupsep.Get().(string) 80 | config.WordSeparator = wordsep.Get().(string) 81 | config.WordPadding = pad.Get().(rune) 82 | if *words > 0 { 83 | config.WordsPerGroup = *words 84 | } 85 | if *groups > 0 { 86 | config.GroupsPerLine = *groups 87 | } 88 | if *nopad { 89 | config.WordPadding = 0 90 | } 91 | 92 | vlog.Println("Wordlist ver", mnemonicode.WordListVersion) 93 | 94 | input := io.Reader(os.Stdin) 95 | if *hexin { 96 | input = transform.NewReader(input, new(hexinput)) 97 | } 98 | 99 | var n int64 100 | var err error 101 | if true { 102 | enc := mnemonicode.NewEncoder(os.Stdout, config) 103 | n, err = io.Copy(enc, input) 104 | if err != nil { 105 | log.Fatal(err) 106 | } 107 | err = enc.Close() 108 | } else { 109 | r := mnemonicode.NewEncodeReader(input, config) 110 | n, err = io.Copy(os.Stdout, r) 111 | } 112 | if err != nil { 113 | log.Fatal(err) 114 | } 115 | fmt.Println() 116 | vlog.Println("bytes encoded:", n) 117 | } 118 | -------------------------------------------------------------------------------- /fuzz.go: -------------------------------------------------------------------------------- 1 | // For use with go-fuzz, "github.com/dvyukov/go-fuzz" 2 | // 3 | // +build gofuzz 4 | 5 | package mnemonicode 6 | 7 | import ( 8 | "bytes" 9 | "fmt" 10 | 11 | "golang.org/x/text/transform" 12 | ) 13 | 14 | var ( 15 | tenc = NewEncodeTransformer(nil) 16 | tdec = NewDecodeTransformer() 17 | tencdec = transform.Chain(tenc, tdec) 18 | ) 19 | 20 | //go:generate go-fuzz-build bitbucket.org/dchapes/mnemonicode 21 | // Then: 22 | // go-fuzz -bin=mnemonicode-fuzz.zip -workdir=fuzz 23 | 24 | // Fuzz is for use with go-fuzz, "github.com/dvyukov/go-fuzz" 25 | func Fuzz(data []byte) int { 26 | words := EncodeWordList(nil, data) 27 | if len(words) != WordsRequired(len(data)) { 28 | panic("bad WordsRequired result") 29 | } 30 | data2, err := DecodeWordList(nil, words) 31 | if err != nil { 32 | fmt.Println("words:", words) 33 | panic(err) 34 | } 35 | if !bytes.Equal(data, data2) { 36 | fmt.Println("words:", words) 37 | panic("data != data2") 38 | } 39 | 40 | data3, _, err := transform.Bytes(tencdec, data) 41 | if err != nil { 42 | panic(err) 43 | } 44 | if !bytes.Equal(data, data3) { 45 | fmt.Println("words:", words) 46 | panic("data != data3") 47 | } 48 | 49 | if len(data) == 0 { 50 | return 0 51 | } 52 | return 1 53 | } 54 | 55 | //go:generate go-fuzz-build -func Fuzz2 -o mnemonicode-fuzz2.zip bitbucket.org/dchapes/mnemonicode 56 | // Then: 57 | // go-fuzz -bin=mnemonicode-fuzz2.zip -workdir=fuzz2 58 | 59 | // Fuzz2 is another fuzz tester, this time with words as input rather than binary data. 60 | func Fuzz2(data []byte) int { 61 | _, _, err := transform.Bytes(tdec, data) 62 | if err != nil { 63 | if _, ok := err.(WordError); !ok { 64 | return 0 65 | } 66 | fmt.Println("Unexpected error") 67 | panic(err) 68 | } 69 | return 1 70 | } 71 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/schollz/mnemonicode 2 | 3 | require golang.org/x/text v0.3.0 4 | -------------------------------------------------------------------------------- /issue002_test.go: -------------------------------------------------------------------------------- 1 | package mnemonicode_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/schollz/mnemonicode" 10 | ) 11 | 12 | func TestIssue002(t *testing.T) { 13 | buf := &bytes.Buffer{} 14 | // Code from: 15 | const issue = `https://bitbucket.org/dchapes/mnemonicode/issues/2` 16 | 17 | config := mnemonicode.NewDefaultConfig() 18 | config.GroupsPerLine = 1 19 | config.LineSuffix = "\n" 20 | config.GroupSeparator = "\n" 21 | config.WordPadding = 0 22 | config.WordsPerGroup = 1 23 | config.WordSeparator = "\n" 24 | src := strings.NewReader("abcdefgh") 25 | r := mnemonicode.NewEncodeReader(src, config) 26 | //io.Copy(os.Stdout, r) 27 | io.Copy(buf, r) 28 | 29 | // Note, in the issue the expected trailing newline is missing. 30 | const expected = `bogart 31 | atlas 32 | safari 33 | airport 34 | cabaret 35 | shock` 36 | if s := buf.String(); s != expected { 37 | t.Errorf("%v\n\tgave %q\n\twant%q", issue, s, expected) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /mnemonicode.go: -------------------------------------------------------------------------------- 1 | // Package mnemonicode … 2 | package mnemonicode 3 | 4 | import ( 5 | "fmt" 6 | "io" 7 | "strings" 8 | "unicode/utf8" 9 | 10 | "golang.org/x/text/transform" 11 | ) 12 | 13 | // WordsRequired returns the number of words required to encode input 14 | // data of length bytes using mnomonic encoding. 15 | // 16 | // Every four bytes of input is encoded into three words. If there 17 | // is an extra one or two bytes they get an extra one or two words 18 | // respectively. If there is an extra three bytes, they will be encoded 19 | // into three words with the last word being one of a small set of very 20 | // short words (only needed to encode the last 3 bits). 21 | func WordsRequired(length int) int { 22 | return ((length + 1) * 3) / 4 23 | } 24 | 25 | // A Config structure contains options for mneomonic encoding. 26 | // 27 | // {PREFIX}word{wsep}word{gsep}word{wsep}word{SUFFIX} 28 | type Config struct { 29 | LinePrefix string 30 | LineSuffix string 31 | WordSeparator string 32 | GroupSeparator string 33 | WordsPerGroup uint 34 | GroupsPerLine uint 35 | WordPadding rune 36 | } 37 | 38 | var defaultConfig = Config{ 39 | LinePrefix: "", 40 | LineSuffix: "\n", 41 | WordSeparator: " ", 42 | GroupSeparator: " - ", 43 | WordsPerGroup: 3, 44 | GroupsPerLine: 3, 45 | WordPadding: ' ', 46 | } 47 | 48 | // NewDefaultConfig returns a newly allocated Config initialised with default values. 49 | func NewDefaultConfig() *Config { 50 | r := new(Config) 51 | *r = defaultConfig 52 | return r 53 | } 54 | 55 | // NewEncodeReader returns a new io.Reader that will return a 56 | // formatted list of mnemonic words representing the bytes in r. 57 | // 58 | // The configuration of the word formatting is controlled 59 | // by c, which can be nil for default formatting. 60 | func NewEncodeReader(r io.Reader, c *Config) io.Reader { 61 | t := NewEncodeTransformer(c) 62 | return transform.NewReader(r, t) 63 | } 64 | 65 | // NewEncoder returns a new io.WriteCloser that will write a formatted 66 | // list of mnemonic words representing the bytes written to w. The user 67 | // needs to call Close to flush unwritten bytes that may be buffered. 68 | // 69 | // The configuration of the word formatting is controlled 70 | // by c, which can be nil for default formatting. 71 | func NewEncoder(w io.Writer, c *Config) io.WriteCloser { 72 | t := NewEncodeTransformer(c) 73 | return transform.NewWriter(w, t) 74 | } 75 | 76 | // NewEncodeTransformer returns a new transformer 77 | // that encodes bytes into mnemonic words. 78 | // 79 | // The configuration of the word formatting is controlled 80 | // by c, which can be nil for default formatting. 81 | func NewEncodeTransformer(c *Config) transform.Transformer { 82 | if c == nil { 83 | c = &defaultConfig 84 | } 85 | return &enctrans{ 86 | c: *c, 87 | state: needPrefix, 88 | } 89 | } 90 | 91 | type enctrans struct { 92 | c Config 93 | state encTransState 94 | wordCnt uint 95 | groupCnt uint 96 | wordidx [3]int 97 | wordidxcnt int // remaining indexes in wordidx; wordidx[3-wordidxcnt:] 98 | } 99 | 100 | func (t *enctrans) Reset() { 101 | t.state = needPrefix 102 | t.wordCnt = 0 103 | t.groupCnt = 0 104 | t.wordidxcnt = 0 105 | } 106 | 107 | type encTransState uint8 108 | 109 | const ( 110 | needNothing = iota 111 | needPrefix 112 | needWordSep 113 | needGroupSep 114 | needSuffix 115 | ) 116 | 117 | func (t *enctrans) strState() (str string, nextState encTransState) { 118 | switch t.state { 119 | case needPrefix: 120 | str = t.c.LinePrefix 121 | case needWordSep: 122 | str = t.c.WordSeparator 123 | case needGroupSep: 124 | str = t.c.GroupSeparator 125 | case needSuffix: 126 | str = t.c.LineSuffix 127 | nextState = needPrefix 128 | } 129 | return 130 | } 131 | 132 | func (t *enctrans) advState() { 133 | t.wordCnt++ 134 | if t.wordCnt < t.c.WordsPerGroup { 135 | t.state = needWordSep 136 | } else { 137 | t.wordCnt = 0 138 | t.groupCnt++ 139 | if t.groupCnt < t.c.GroupsPerLine { 140 | t.state = needGroupSep 141 | } else { 142 | t.groupCnt = 0 143 | t.state = needSuffix 144 | } 145 | } 146 | } 147 | 148 | // transformWords consumes words from wordidx copying the words with 149 | // formatting into dst. 150 | // On return, if err==nil, all words were consumed (wordidxcnt==0). 151 | func (t *enctrans) transformWords(dst []byte) (nDst int, err error) { 152 | //log.Println("transformWords: len(dst)=",len(dst),"wordidxcnt=",t.wordidxcnt) 153 | for t.wordidxcnt > 0 { 154 | for t.state != needNothing { 155 | str, nextState := t.strState() 156 | if len(dst) < len(str) { 157 | return nDst, transform.ErrShortDst 158 | } 159 | n := copy(dst, str) 160 | dst = dst[n:] 161 | nDst += n 162 | t.state = nextState 163 | } 164 | word := WordList[t.wordidx[3-t.wordidxcnt]] 165 | n := len(word) 166 | if n < longestWord { 167 | if rlen := utf8.RuneLen(t.c.WordPadding); rlen > 0 { 168 | n += (longestWord - n) * rlen 169 | } 170 | } 171 | if len(dst) < n { 172 | return nDst, transform.ErrShortDst 173 | } 174 | n = copy(dst, word) 175 | t.wordidxcnt-- 176 | dst = dst[n:] 177 | nDst += n 178 | if t.c.WordPadding != 0 { 179 | for i := n; i < longestWord; i++ { 180 | n = utf8.EncodeRune(dst, t.c.WordPadding) 181 | dst = dst[n:] 182 | nDst += n 183 | } 184 | } 185 | t.advState() 186 | } 187 | return nDst, nil 188 | } 189 | 190 | // Transform implements the transform.Transformer interface. 191 | func (t *enctrans) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { 192 | //log.Printf("Transform(%d,%d,%t)\n", len(dst), len(src), atEOF) 193 | var n int 194 | for { 195 | if t.wordidxcnt > 0 { 196 | n, err = t.transformWords(dst) 197 | dst = dst[n:] 198 | nDst += n 199 | if err != nil { 200 | //log.Printf("\t\t\tRet1: (%d) %d, %d, %v\n", t.wordidxcnt, nDst, nSrc, err) 201 | return 202 | } 203 | } 204 | var x uint32 205 | switch { 206 | case len(src) >= 4: 207 | x = uint32(src[0]) 208 | x |= uint32(src[1]) << 8 209 | x |= uint32(src[2]) << 16 210 | x |= uint32(src[3]) << 24 211 | src = src[4:] 212 | nSrc += 4 213 | 214 | t.wordidx[0] = int(x % base) 215 | t.wordidx[1] = int(x/base) % base 216 | t.wordidx[2] = int(x/base/base) % base 217 | t.wordidxcnt = 3 218 | //log.Printf("\t\tConsumed 4 bytes (%d, %d)", nDst, nSrc) 219 | //continue 220 | case len(src) == 0: 221 | //log.Printf("\t\t\tRet2: (%d) %d, %d, %v\n", t.wordidxcnt, nDst, nSrc, err) 222 | return 223 | case !atEOF: 224 | //log.Printf("\t\t!atEOF (%d, %d)", nDst, nSrc) 225 | err = transform.ErrShortSrc 226 | return 227 | default: 228 | x = 0 229 | n = len(src) 230 | for i := n - 1; i >= 0; i-- { 231 | x <<= 8 232 | x |= uint32(src[i]) 233 | } 234 | t.wordidx[3-n] = int(x % base) 235 | if n >= 2 { 236 | t.wordidx[4-n] = int(x/base) % base 237 | } 238 | if n == 3 { 239 | t.wordidx[2] = base + int(x/base/base)%7 240 | } 241 | src = src[n:] 242 | nSrc += n 243 | t.wordidxcnt = n 244 | //log.Printf("\t\tatEOF (%d) (%d, %d)", t.wordidxcnt, nDst, nSrc) 245 | //continue 246 | } 247 | } 248 | } 249 | 250 | // 251 | 252 | // NewDecoder returns a new io.Reader that will return the 253 | // decoded bytes from mnemonic words in r. Unrecognized 254 | // words in r will cause reads to return an error. 255 | func NewDecoder(r io.Reader) io.Reader { 256 | t := NewDecodeTransformer() 257 | return transform.NewReader(r, t) 258 | } 259 | 260 | // NewDecodeWriter returns a new io.WriteCloser that will 261 | // write decoded bytes from mnemonic words written to it. 262 | // Unrecognized words will cause a write error. The user needs 263 | // to call Close to flush unwritten bytes that may be buffered. 264 | func NewDecodeWriter(w io.Writer) io.WriteCloser { 265 | t := NewDecodeTransformer() 266 | return transform.NewWriter(w, t) 267 | } 268 | 269 | // NewDecodeTransformer returns a new transform 270 | // that decodes mnemonic words into the represented 271 | // bytes. Unrecognized words will trigger an error. 272 | func NewDecodeTransformer() transform.Transformer { 273 | return &dectrans{wordidx: make([]int, 0, 3)} 274 | } 275 | 276 | type dectrans struct { 277 | wordidx []int 278 | short bool // last word in wordidx is/was short 279 | } 280 | 281 | func (t *dectrans) Reset() { 282 | t.wordidx = nil 283 | t.short = false 284 | } 285 | 286 | func (t *dectrans) transformWords(dst []byte) (int, error) { 287 | //log.Println("transformWords: len(dst)=",len(dst),"len(t.wordidx)=", len(t.wordidx)) 288 | n := len(t.wordidx) 289 | if n == 3 && !t.short { 290 | n = 4 291 | } 292 | if len(dst) < n { 293 | return 0, transform.ErrShortDst 294 | } 295 | for len(t.wordidx) < 3 { 296 | t.wordidx = append(t.wordidx, 0) 297 | } 298 | x := uint32(t.wordidx[2]) 299 | x *= base 300 | x += uint32(t.wordidx[1]) 301 | x *= base 302 | x += uint32(t.wordidx[0]) 303 | for i := 0; i < n; i++ { 304 | dst[i] = byte(x) 305 | x >>= 8 306 | } 307 | t.wordidx = t.wordidx[:0] 308 | return n, nil 309 | } 310 | 311 | type WordError interface { 312 | error 313 | Word() string 314 | } 315 | 316 | type UnexpectedWordError string 317 | type UnexpectedEndWordError string 318 | type UnknownWordError string 319 | 320 | func (e UnexpectedWordError) Word() string { return string(e) } 321 | func (e UnexpectedEndWordError) Word() string { return string(e) } 322 | func (e UnknownWordError) Word() string { return string(e) } 323 | func (e UnexpectedWordError) Error() string { 324 | return fmt.Sprintf("mnemonicode: unexpected word after short word: %q", string(e)) 325 | } 326 | func (e UnexpectedEndWordError) Error() string { 327 | return fmt.Sprintf("mnemonicode: unexpected end word: %q", string(e)) 328 | } 329 | func (e UnknownWordError) Error() string { 330 | return fmt.Sprintf("mnemonicode: unknown word: %q", string(e)) 331 | } 332 | 333 | // Transform implements the transform.Transformer interface. 334 | func (t *dectrans) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { 335 | //log.Printf("Transform(%d,%d,%t)\n", len(dst), len(src), atEOF) 336 | var n int 337 | for len(t.wordidx) > 0 || len(src) > 0 { 338 | for len(t.wordidx) < 3 { 339 | var word []byte 340 | var idx int 341 | //n, word, err = bufio.ScanWords(src, atEOF) 342 | n, word, err = scanWords(src, atEOF) 343 | src = src[n:] 344 | nSrc += n 345 | if err != nil { 346 | //log.Print("ScanWords error:", err) 347 | return 348 | } 349 | if word == nil { 350 | if atEOF { 351 | //log.Printf("atEOF (%d, %d) %d, %d", nDst, nSrc, n, len(src)) 352 | n = len(src) 353 | src = src[n:] 354 | nSrc += n 355 | break 356 | } 357 | //log.Printf("\t\t!atEOF (%d, %d)", nDst, nSrc) 358 | err = transform.ErrShortSrc 359 | return 360 | } 361 | if t.short { 362 | err = UnexpectedWordError(word) 363 | //log.Print("short error:", err) 364 | return 365 | } 366 | idx, _, t.short, err = closestWordIdx(string(word), len(t.wordidx) == 2) 367 | if err != nil { 368 | //log.Print("closestWordIdx error:", err) 369 | return 370 | } 371 | t.wordidx = append(t.wordidx, idx) 372 | } 373 | if len(t.wordidx) > 0 { 374 | n, err = t.transformWords(dst) 375 | dst = dst[n:] 376 | nDst += n 377 | if n != 4 { 378 | //log.Println("transformWords returned:", n, err) 379 | //log.Println("len(t.wordidx):", len(t.wordidx), len(src)) 380 | } 381 | if err != nil { 382 | //log.Printf("\t\t\tRet1: (%d) %d, %d, %v\n", len(t.wordidx), nDst, nSrc, err) 383 | return 384 | } 385 | } 386 | } 387 | return 388 | } 389 | 390 | // 391 | 392 | const base = 1626 393 | 394 | // EncodeWordList encodes src into mnemomic words which are appended to dst. 395 | // The final wordlist is returned. 396 | // There will be WordsRequired(len(src)) words appeneded. 397 | func EncodeWordList(dst []string, src []byte) (result []string) { 398 | if n := len(dst) + WordsRequired(len(src)); cap(dst) < n { 399 | result = make([]string, len(dst), n) 400 | copy(result, dst) 401 | } else { 402 | result = dst 403 | } 404 | 405 | var x uint32 406 | for len(src) >= 4 { 407 | x = uint32(src[0]) 408 | x |= uint32(src[1]) << 8 409 | x |= uint32(src[2]) << 16 410 | x |= uint32(src[3]) << 24 411 | src = src[4:] 412 | 413 | i0 := int(x % base) 414 | i1 := int(x/base) % base 415 | i2 := int(x/base/base) % base 416 | result = append(result, WordList[i0], WordList[i1], WordList[i2]) 417 | } 418 | if len(src) > 0 { 419 | x = 0 420 | for i := len(src) - 1; i >= 0; i-- { 421 | x <<= 8 422 | x |= uint32(src[i]) 423 | } 424 | i := int(x % base) 425 | result = append(result, WordList[i]) 426 | if len(src) >= 2 { 427 | i = int(x/base) % base 428 | result = append(result, WordList[i]) 429 | } 430 | if len(src) == 3 { 431 | i = base + int(x/base/base)%7 432 | result = append(result, WordList[i]) 433 | } 434 | } 435 | 436 | return result 437 | } 438 | 439 | func closestWordIdx(word string, shortok bool) (idx int, exact, short bool, err error) { 440 | word = strings.ToLower(word) 441 | if idx, exact = wordMap[word]; !exact { 442 | // TODO(dchapes): normalize unicode, remove accents, etc 443 | // TODO(dchapes): phonetic algorithm or other closest match 444 | err = UnknownWordError(word) 445 | return 446 | } 447 | if short = (idx >= base); short { 448 | idx -= base 449 | if !shortok { 450 | err = UnexpectedEndWordError(word) 451 | } 452 | } 453 | return 454 | } 455 | 456 | // DecodeWordList decodes the mnemonic words in src into bytes which are 457 | // appended to dst. 458 | func DecodeWordList(dst []byte, src []string) (result []byte, err error) { 459 | if n := (len(src)+2)/3*4 + len(dst); cap(dst) < n { 460 | result = make([]byte, len(dst), n) 461 | copy(result, dst) 462 | } else { 463 | result = dst 464 | } 465 | 466 | var idx [3]int 467 | for len(src) > 3 { 468 | if idx[0], _, _, err = closestWordIdx(src[0], false); err != nil { 469 | return nil, err 470 | } 471 | if idx[1], _, _, err = closestWordIdx(src[1], false); err != nil { 472 | return nil, err 473 | } 474 | if idx[2], _, _, err = closestWordIdx(src[2], false); err != nil { 475 | return nil, err 476 | } 477 | src = src[3:] 478 | x := uint32(idx[2]) 479 | x *= base 480 | x += uint32(idx[1]) 481 | x *= base 482 | x += uint32(idx[0]) 483 | result = append(result, byte(x), byte(x>>8), byte(x>>16), byte(x>>24)) 484 | } 485 | 486 | if len(src) > 0 { 487 | var short bool 488 | idx[1] = 0 489 | idx[2] = 0 490 | n := len(src) 491 | for i := 0; i < n; i++ { 492 | idx[i], _, short, err = closestWordIdx(src[i], i == 2) 493 | if err != nil { 494 | return nil, err 495 | } 496 | } 497 | x := uint32(idx[2]) 498 | x *= base 499 | x += uint32(idx[1]) 500 | x *= base 501 | x += uint32(idx[0]) 502 | result = append(result, byte(x)) 503 | if n > 1 { 504 | result = append(result, byte(x>>8)) 505 | } 506 | if n > 2 { 507 | result = append(result, byte(x>>16)) 508 | if !short { 509 | result = append(result, byte(x>>24)) 510 | } 511 | } 512 | } 513 | 514 | /* 515 | for len(src) > 0 { 516 | short := false 517 | n := len(src) 518 | if n > 3 { 519 | n = 3 520 | } 521 | for i := 0; i < n; i++ { 522 | idx[i], _, err = closestWordIdx(src[i]) 523 | if err != nil { 524 | return nil, err 525 | } 526 | if idx[i] >= base { 527 | if i != 2 || len(src) != 3 { 528 | return nil, UnexpectedEndWord(src[i]) 529 | } 530 | short = true 531 | idx[i] -= base 532 | } 533 | } 534 | for i := n; i < 3; i++ { 535 | idx[i] = 0 536 | } 537 | src = src[n:] 538 | x := uint32(idx[2]) 539 | x *= base 540 | x += uint32(idx[1]) 541 | x *= base 542 | x += uint32(idx[0]) 543 | result = append(result, byte(x)) 544 | if n > 1 { 545 | result = append(result, byte(x>>8)) 546 | } 547 | if n > 2 { 548 | result = append(result, byte(x>>16)) 549 | if !short { 550 | result = append(result, byte(x>>24)) 551 | } 552 | } 553 | } 554 | */ 555 | 556 | return result, nil 557 | } 558 | -------------------------------------------------------------------------------- /mnemonicode_test.go: -------------------------------------------------------------------------------- 1 | package mnemonicode 2 | 3 | import ( 4 | "bytes" 5 | "encoding/hex" 6 | "fmt" 7 | "strings" 8 | "testing" 9 | 10 | "golang.org/x/text/transform" 11 | ) 12 | 13 | func TestWordsReq(t *testing.T) { 14 | for i, n := range []int{0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10} { 15 | r := WordsRequired(i) 16 | if r != n { 17 | t.Errorf("WordsRequired(%d) returned %d, expected %d", i, r, n) 18 | } 19 | } 20 | } 21 | 22 | var testData = []struct { 23 | hex string 24 | words []string 25 | }{ 26 | {"01", []string{"acrobat"}}, 27 | {"0102", []string{"opera", "academy"}}, 28 | {"010203", []string{"kayak", "cement", "ego"}}, 29 | {"01020304", []string{"papa", "twist", "alpine"}}, 30 | {"0102030405", []string{"papa", "twist", "alpine", "admiral"}}, 31 | {"010203040506", []string{"papa", "twist", "alpine", "shine", "academy"}}, 32 | {"01020304050607", []string{"papa", "twist", "alpine", "chess", "flute", "ego"}}, 33 | {"0102030405060708", []string{"papa", "twist", "alpine", "content", "sailor", "athena"}}, 34 | {"00", []string{"academy"}}, 35 | {"5A06", []string{"academy", "acrobat"}}, 36 | {"FE5D28", []string{"academy", "acrobat", "fax"}}, 37 | {"A2B55000", []string{"academy", "acrobat", "active"}}, 38 | {"A2B5500003", []string{"academy", "acrobat", "active", "actor"}}, 39 | {"A2B550006B19", []string{"academy", "acrobat", "active", "actor", "adam"}}, 40 | {"A2B550000F7128", []string{"academy", "acrobat", "active", "actor", "adam", "fax"}}, 41 | {"A2B550009FCFC900", []string{"academy", "acrobat", "active", "actor", "adam", "admiral"}}, 42 | {"FF", []string{"exact"}}, 43 | {"FFFF", []string{"nevada", "archive"}}, 44 | {"FFFFFF", []string{"claudia", "photo", "yes"}}, 45 | {"FFFFFFFF", []string{"natural", "analyze", "verbal"}}, 46 | {"123456789ABCDEF123456789ABCDEF012345", []string{ 47 | "plastic", "roger", "vincent", "pilgrim", "flame", "secure", "apropos", "polka", "earth", "radio", "modern", "aladdin", "marion", "airline"}}, 48 | } 49 | 50 | func compareWordList(tb testing.TB, expected, got []string, args ...interface{}) { 51 | fail := false 52 | if len(expected) != len(got) { 53 | fail = true 54 | } 55 | for i := 0; !fail && i < len(expected); i++ { 56 | fail = expected[i] != got[i] 57 | } 58 | if fail { 59 | prefix := "" 60 | if len(args) > 0 { 61 | prefix += fmt.Sprintln(args...) 62 | prefix = prefix[:len(prefix)-1] + ": " 63 | } 64 | tb.Errorf("%vexpected %v, got %v", prefix, expected, got) 65 | } 66 | } 67 | 68 | func TestEncodeWordList(t *testing.T) { 69 | var result []string 70 | for i, d := range testData { 71 | raw, err := hex.DecodeString(d.hex) 72 | if err != nil { 73 | t.Fatal("bad test data:", i, err) 74 | } 75 | result = EncodeWordList(result, raw) 76 | compareWordList(t, d.words, result, i, d.hex) 77 | result = result[:0] 78 | } 79 | } 80 | 81 | func TestDecodeWordList(t *testing.T) { 82 | var result []byte 83 | var err error 84 | for i, d := range testData { 85 | raw, _ := hex.DecodeString(d.hex) 86 | result, err = DecodeWordList(result, d.words) 87 | if err != nil { 88 | t.Errorf("%2d %v failed: %v", i, d.words, err) 89 | continue 90 | } 91 | if !bytes.Equal(raw, result) { 92 | t.Errorf("%2d %v expected %v got %v", i, d.words, raw, result) 93 | } 94 | result = result[:0] 95 | } 96 | } 97 | 98 | func TestEncodeTransformer(t *testing.T) { 99 | cfg := NewDefaultConfig() 100 | cfg.GroupSeparator = " " 101 | enc := NewEncodeTransformer(cfg) 102 | for i, d := range testData { 103 | raw, err := hex.DecodeString(d.hex) 104 | if err != nil { 105 | t.Fatal("bad test data:", i, err) 106 | } 107 | result, _, err := transform.Bytes(enc, raw) 108 | if err != nil { 109 | t.Errorf("%2d %v failed: %v", i, d.words, err) 110 | continue 111 | } 112 | //t.Logf("%q", result) 113 | words := strings.Fields(string(result)) 114 | compareWordList(t, d.words, words, i, d.hex) 115 | } 116 | 117 | } 118 | 119 | func TestDecodeTransformer(t *testing.T) { 120 | dec := NewDecodeTransformer() 121 | for i, d := range testData { 122 | raw, _ := hex.DecodeString(d.hex) 123 | words := strings.Join(d.words, " ") 124 | result, _, err := transform.Bytes(dec, []byte(words)) 125 | if err != nil { 126 | t.Errorf("%2d %v failed: %v", i, d.words, err) 127 | continue 128 | } 129 | if !bytes.Equal(raw, result) { 130 | t.Errorf("%2d %v expected %v got %v", i, d.words, raw, result) 131 | } 132 | } 133 | } 134 | 135 | func TestEncodeFormatting(t *testing.T) { 136 | raw, _ := hex.DecodeString(testData[20].hex) 137 | input := string(raw) 138 | //words := testData[20].words 139 | tests := []struct { 140 | cfg *Config 141 | formatted string 142 | }{ 143 | {nil, "plastic roger vincent - pilgrim flame secure - apropos polka earth \nradio modern aladdin - marion airline"}, 144 | {&Config{ 145 | LinePrefix: "{P}", 146 | LineSuffix: "{S}\n", 147 | WordSeparator: "{w}", 148 | GroupSeparator: "{g}", 149 | WordsPerGroup: 2, 150 | GroupsPerLine: 2, 151 | WordPadding: '·', 152 | }, 153 | `{P}plastic{w}roger··{g}vincent{w}pilgrim{S} 154 | {P}flame··{w}secure·{g}apropos{w}polka··{S} 155 | {P}earth··{w}radio··{g}modern·{w}aladdin{S} 156 | {P}marion·{w}airline`}, 157 | } 158 | for i, d := range tests { 159 | enc := NewEncodeTransformer(d.cfg) 160 | result, _, err := transform.String(enc, input) 161 | if err != nil { 162 | t.Errorf("%2d transform failed: %v", i, err) 163 | continue 164 | } 165 | if result != d.formatted { 166 | t.Errorf("%2d expected:\n%q\ngot:\n%q", i, d.formatted, result) 167 | } 168 | } 169 | } 170 | 171 | func BenchmarkEncodeWordList(b *testing.B) { 172 | // the list of all known words (except the short end words) 173 | data, err := DecodeWordList(nil, WordList[:base]) 174 | if err != nil { 175 | b.Fatal("DecodeWordList failed:", err) 176 | } 177 | b.SetBytes(int64(len(data))) 178 | b.ReportAllocs() 179 | b.ResetTimer() 180 | var words []string 181 | for i := 0; i < b.N; i++ { 182 | words = EncodeWordList(words[:0], data) 183 | } 184 | } 185 | 186 | func BenchmarkDencodeWordList(b *testing.B) { 187 | b.ReportAllocs() 188 | var buf []byte 189 | var err error 190 | // decode the list of all known words (except the short end words) 191 | for i := 0; i < b.N; i++ { 192 | buf, err = DecodeWordList(buf[:0], WordList[:base]) 193 | if err != nil { 194 | b.Fatal("DecodeWordList failed:", err) 195 | } 196 | } 197 | b.SetBytes(int64(len(buf))) 198 | } 199 | 200 | func BenchmarkEncodeTransformer(b *testing.B) { 201 | // the list of all known words (except the short end words) 202 | data, err := DecodeWordList(nil, WordList[:base]) 203 | if err != nil { 204 | b.Fatal("DecodeWordList failed:", err) 205 | } 206 | enc := NewEncodeTransformer(nil) 207 | b.SetBytes(int64(len(data))) 208 | b.ReportAllocs() 209 | b.ResetTimer() 210 | for i := 0; i < b.N; i++ { 211 | _, _, err := transform.Bytes(enc, data) 212 | if err != nil { 213 | b.Fatal("encode transformer error:", err) 214 | } 215 | } 216 | } 217 | 218 | func BenchmarkDecodeTransformer(b *testing.B) { 219 | data, err := DecodeWordList(nil, WordList[:base]) 220 | if err != nil { 221 | b.Fatal("DecodeWordList failed:", err) 222 | } 223 | enc := NewEncodeTransformer(nil) 224 | words, _, err := transform.Bytes(enc, data) 225 | if err != nil { 226 | b.Fatal("encode transformer error:", err) 227 | } 228 | b.SetBytes(int64(len(data))) 229 | dec := NewDecodeTransformer() 230 | b.ReportAllocs() 231 | b.ResetTimer() 232 | for i := 0; i < b.N; i++ { 233 | _, _, err := transform.Bytes(dec, words) 234 | if err != nil { 235 | b.Fatal("decode transformer error:", err) 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /scan_words.go: -------------------------------------------------------------------------------- 1 | package mnemonicode 2 | 3 | import ( 4 | "unicode" 5 | "unicode/utf8" 6 | ) 7 | 8 | // modified version of bufio.ScanWords from bufio/scan.go 9 | 10 | // scanWords is a split function for a Scanner that returns 11 | // each non-letter separated word of text, with surrounding 12 | // non-leters deleted. It will never return an empty string. 13 | // The definition of letter is set by unicode.IsLetter. 14 | func scanWords(data []byte, atEOF bool) (advance int, token []byte, err error) { 15 | // Skip leading non-letters. 16 | start := 0 17 | for width := 0; start < len(data); start += width { 18 | var r rune 19 | r, width = utf8.DecodeRune(data[start:]) 20 | if unicode.IsLetter(r) { 21 | break 22 | } 23 | } 24 | if atEOF && len(data) == 0 { 25 | return 0, nil, nil 26 | } 27 | // Scan until non-letter, marking end of word. 28 | for width, i := 0, start; i < len(data); i += width { 29 | var r rune 30 | r, width = utf8.DecodeRune(data[i:]) 31 | if !unicode.IsLetter(r) { 32 | return i + width, data[start:i], nil 33 | } 34 | } 35 | // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. 36 | if atEOF && len(data) > start { 37 | return len(data), data[start:], nil 38 | } 39 | // Request more data. 40 | return 0, nil, nil 41 | } 42 | -------------------------------------------------------------------------------- /word_list.go: -------------------------------------------------------------------------------- 1 | package mnemonicode 2 | 3 | // WordListVersion is the version of compiled in word list. 4 | const WordListVersion = "0.7" 5 | 6 | var wordMap = make(map[string]int, len(WordList)) 7 | 8 | func init() { 9 | for i, w := range WordList { 10 | wordMap[w] = i 11 | } 12 | } 13 | 14 | const longestWord = 7 15 | 16 | var WordList = []string{ 17 | "academy", "acrobat", "active", "actor", "adam", "admiral", 18 | "adrian", "africa", "agenda", "agent", "airline", "airport", 19 | "aladdin", "alarm", "alaska", "albert", "albino", "album", 20 | "alcohol", "alex", "algebra", "alibi", "alice", "alien", 21 | "alpha", "alpine", "amadeus", "amanda", "amazon", "amber", 22 | "america", "amigo", "analog", "anatomy", "angel", "animal", 23 | "antenna", "antonio", "apollo", "april", "archive", "arctic", 24 | "arizona", "arnold", "aroma", "arthur", "artist", "asia", 25 | "aspect", "aspirin", "athena", "athlete", "atlas", "audio", 26 | "august", "austria", "axiom", "aztec", "balance", "ballad", 27 | "banana", "bandit", "banjo", "barcode", "baron", "basic", 28 | "battery", "belgium", "berlin", "bermuda", "bernard", "bikini", 29 | "binary", "bingo", "biology", "block", "blonde", "bonus", 30 | "boris", "boston", "boxer", "brandy", "bravo", "brazil", 31 | "bronze", "brown", "bruce", "bruno", "burger", "burma", 32 | "cabinet", "cactus", "cafe", "cairo", "cake", "calypso", 33 | "camel", "camera", "campus", "canada", "canal", "cannon", 34 | "canoe", "cantina", "canvas", "canyon", "capital", "caramel", 35 | "caravan", "carbon", "cargo", "carlo", "carol", "carpet", 36 | "cartel", "casino", "castle", "castro", "catalog", "caviar", 37 | "cecilia", "cement", "center", "century", "ceramic", "chamber", 38 | "chance", "change", "chaos", "charlie", "charm", "charter", 39 | "chef", "chemist", "cherry", "chess", "chicago", "chicken", 40 | "chief", "china", "cigar", "cinema", "circus", "citizen", 41 | "city", "clara", "classic", "claudia", "clean", "client", 42 | "climax", "clinic", "clock", "club", "cobra", "coconut", 43 | "cola", "collect", "colombo", "colony", "color", "combat", 44 | "comedy", "comet", "command", "compact", "company", "complex", 45 | "concept", "concert", "connect", "consul", "contact", "context", 46 | "contour", "control", "convert", "copy", "corner", "corona", 47 | "correct", "cosmos", "couple", "courage", "cowboy", "craft", 48 | "crash", "credit", "cricket", "critic", "crown", "crystal", 49 | "cuba", "culture", "dallas", "dance", "daniel", "david", 50 | "decade", "decimal", "deliver", "delta", "deluxe", "demand", 51 | "demo", "denmark", "derby", "design", "detect", "develop", 52 | "diagram", "dialog", "diamond", "diana", "diego", "diesel", 53 | "diet", "digital", "dilemma", "diploma", "direct", "disco", 54 | "disney", "distant", "doctor", "dollar", "dominic", "domino", 55 | "donald", "dragon", "drama", "dublin", "duet", "dynamic", 56 | "east", "ecology", "economy", "edgar", "egypt", "elastic", 57 | "elegant", "element", "elite", "elvis", "email", "energy", 58 | "engine", "english", "episode", "equator", "escort", "ethnic", 59 | "europe", "everest", "evident", "exact", "example", "exit", 60 | "exotic", "export", "express", "extra", "fabric", "factor", 61 | "falcon", "family", "fantasy", "fashion", "fiber", "fiction", 62 | "fidel", "fiesta", "figure", "film", "filter", "final", 63 | "finance", "finish", "finland", "flash", "florida", "flower", 64 | "fluid", "flute", "focus", "ford", "forest", "formal", 65 | "format", "formula", "fortune", "forum", "fragile", "france", 66 | "frank", "friend", "frozen", "future", "gabriel", "galaxy", 67 | "gallery", "gamma", "garage", "garden", "garlic", "gemini", 68 | "general", "genetic", "genius", "germany", "global", "gloria", 69 | "golf", "gondola", "gong", "good", "gordon", "gorilla", 70 | "grand", "granite", "graph", "green", "group", "guide", 71 | "guitar", "guru", "hand", "happy", "harbor", "harmony", 72 | "harvard", "havana", "hawaii", "helena", "hello", "henry", 73 | "hilton", "history", "horizon", "hotel", "human", "humor", 74 | "icon", "idea", "igloo", "igor", "image", "impact", 75 | "import", "index", "india", "indigo", "input", "insect", 76 | "instant", "iris", "italian", "jacket", "jacob", "jaguar", 77 | "janet", "japan", "jargon", "jazz", "jeep", "john", 78 | "joker", "jordan", "jumbo", "june", "jungle", "junior", 79 | "jupiter", "karate", "karma", "kayak", "kermit", "kilo", 80 | "king", "koala", "korea", "labor", "lady", "lagoon", 81 | "laptop", "laser", "latin", "lava", "lecture", "left", 82 | "legal", "lemon", "level", "lexicon", "liberal", "libra", 83 | "limbo", "limit", "linda", "linear", "lion", "liquid", 84 | "liter", "little", "llama", "lobby", "lobster", "local", 85 | "logic", "logo", "lola", "london", "lotus", "lucas", 86 | "lunar", "machine", "macro", "madam", "madonna", "madrid", 87 | "maestro", "magic", "magnet", "magnum", "major", "mama", 88 | "mambo", "manager", "mango", "manila", "marco", "marina", 89 | "market", "mars", "martin", "marvin", "master", "matrix", 90 | "maximum", "media", "medical", "mega", "melody", "melon", 91 | "memo", "mental", "mentor", "menu", "mercury", "message", 92 | "metal", "meteor", "meter", "method", "metro", "mexico", 93 | "miami", "micro", "million", "mineral", "minimum", "minus", 94 | "minute", "miracle", "mirage", "miranda", "mister", "mixer", 95 | "mobile", "model", "modem", "modern", "modular", "moment", 96 | "monaco", "monica", "monitor", "mono", "monster", "montana", 97 | "morgan", "motel", "motif", "motor", "mozart", "multi", 98 | "museum", "music", "mustang", "natural", "neon", "nepal", 99 | "neptune", "nerve", "neutral", "nevada", "news", "ninja", 100 | "nirvana", "normal", "nova", "novel", "nuclear", "numeric", 101 | "nylon", "oasis", "object", "observe", "ocean", "octopus", 102 | "olivia", "olympic", "omega", "opera", "optic", "optimal", 103 | "orange", "orbit", "organic", "orient", "origin", "orlando", 104 | "oscar", "oxford", "oxygen", "ozone", "pablo", "pacific", 105 | "pagoda", "palace", "pamela", "panama", "panda", "panel", 106 | "panic", "paradox", "pardon", "paris", "parker", "parking", 107 | "parody", "partner", "passage", "passive", "pasta", "pastel", 108 | "patent", "patriot", "patrol", "patron", "pegasus", "pelican", 109 | "penguin", "pepper", "percent", "perfect", "perfume", "period", 110 | "permit", "person", "peru", "phone", "photo", "piano", 111 | "picasso", "picnic", "picture", "pigment", "pilgrim", "pilot", 112 | "pirate", "pixel", "pizza", "planet", "plasma", "plaster", 113 | "plastic", "plaza", "pocket", "poem", "poetic", "poker", 114 | "polaris", "police", "politic", "polo", "polygon", "pony", 115 | "popcorn", "popular", "postage", "postal", "precise", "prefix", 116 | "premium", "present", "price", "prince", "printer", "prism", 117 | "private", "product", "profile", "program", "project", "protect", 118 | "proton", "public", "pulse", "puma", "pyramid", "queen", 119 | "radar", "radio", "random", "rapid", "rebel", "record", 120 | "recycle", "reflex", "reform", "regard", "regular", "relax", 121 | "report", "reptile", "reverse", "ricardo", "ringo", "ritual", 122 | "robert", "robot", "rocket", "rodeo", "romeo", "royal", 123 | "russian", "safari", "salad", "salami", "salmon", "salon", 124 | "salute", "samba", "sandra", "santana", "sardine", "school", 125 | "screen", "script", "second", "secret", "section", "segment", 126 | "select", "seminar", "senator", "senior", "sensor", "serial", 127 | "service", "sheriff", "shock", "sierra", "signal", "silicon", 128 | "silver", "similar", "simon", "single", "siren", "slogan", 129 | "social", "soda", "solar", "solid", "solo", "sonic", 130 | "soviet", "special", "speed", "spiral", "spirit", "sport", 131 | "static", "station", "status", "stereo", "stone", "stop", 132 | "street", "strong", "student", "studio", "style", "subject", 133 | "sultan", "super", "susan", "sushi", "suzuki", "switch", 134 | "symbol", "system", "tactic", "tahiti", "talent", "tango", 135 | "tarzan", "taxi", "telex", "tempo", "tennis", "texas", 136 | "textile", "theory", "thermos", "tiger", "titanic", "tokyo", 137 | "tomato", "topic", "tornado", "toronto", "torpedo", "total", 138 | "totem", "tourist", "tractor", "traffic", "transit", "trapeze", 139 | "travel", "tribal", "trick", "trident", "trilogy", "tripod", 140 | "tropic", "trumpet", "tulip", "tuna", "turbo", "twist", 141 | "ultra", "uniform", "union", "uranium", "vacuum", "valid", 142 | "vampire", "vanilla", "vatican", "velvet", "ventura", "venus", 143 | "vertigo", "veteran", "victor", "video", "vienna", "viking", 144 | "village", "vincent", "violet", "violin", "virtual", "virus", 145 | "visa", "vision", "visitor", "visual", "vitamin", "viva", 146 | "vocal", "vodka", "volcano", "voltage", "volume", "voyage", 147 | "water", "weekend", "welcome", "western", "window", "winter", 148 | "wizard", "wolf", "world", "xray", "yankee", "yoga", 149 | "yogurt", "yoyo", "zebra", "zero", "zigzag", "zipper", 150 | "zodiac", "zoom", "abraham", "action", "address", "alabama", 151 | "alfred", "almond", "ammonia", "analyze", "annual", "answer", 152 | "apple", "arena", "armada", "arsenal", "atlanta", "atomic", 153 | "avenue", "average", "bagel", "baker", "ballet", "bambino", 154 | "bamboo", "barbara", "basket", "bazaar", "benefit", "bicycle", 155 | "bishop", "blitz", "bonjour", "bottle", "bridge", "british", 156 | "brother", "brush", "budget", "cabaret", "cadet", "candle", 157 | "capitan", "capsule", "career", "cartoon", "channel", "chapter", 158 | "cheese", "circle", "cobalt", "cockpit", "college", "compass", 159 | "comrade", "condor", "crimson", "cyclone", "darwin", "declare", 160 | "degree", "delete", "delphi", "denver", "desert", "divide", 161 | "dolby", "domain", "domingo", "double", "drink", "driver", 162 | "eagle", "earth", "echo", "eclipse", "editor", "educate", 163 | "edward", "effect", "electra", "emerald", "emotion", "empire", 164 | "empty", "escape", "eternal", "evening", "exhibit", "expand", 165 | "explore", "extreme", "ferrari", "first", "flag", "folio", 166 | "forget", "forward", "freedom", "fresh", "friday", "fuji", 167 | "galileo", "garcia", "genesis", "gold", "gravity", "habitat", 168 | "hamlet", "harlem", "helium", "holiday", "house", "hunter", 169 | "ibiza", "iceberg", "imagine", "infant", "isotope", "jackson", 170 | "jamaica", "jasmine", "java", "jessica", "judo", "kitchen", 171 | "lazarus", "letter", "license", "lithium", "loyal", "lucky", 172 | "magenta", "mailbox", "manual", "marble", "mary", "maxwell", 173 | "mayor", "milk", "monarch", "monday", "money", "morning", 174 | "mother", "mystery", "native", "nectar", "nelson", "network", 175 | "next", "nikita", "nobel", "nobody", "nominal", "norway", 176 | "nothing", "number", "october", "office", "oliver", "opinion", 177 | "option", "order", "outside", "package", "pancake", "pandora", 178 | "panther", "papa", "patient", "pattern", "pedro", "pencil", 179 | "people", "phantom", "philips", "pioneer", "pluto", "podium", 180 | "portal", "potato", "prize", "process", "protein", "proxy", 181 | "pump", "pupil", "python", "quality", "quarter", "quiet", 182 | "rabbit", "radical", "radius", "rainbow", "ralph", "ramirez", 183 | "ravioli", "raymond", "respect", "respond", "result", "resume", 184 | "retro", "richard", "right", "risk", "river", "roger", 185 | "roman", "rondo", "sabrina", "salary", "salsa", "sample", 186 | "samuel", "saturn", "savage", "scarlet", "scoop", "scorpio", 187 | "scratch", "scroll", "sector", "serpent", "shadow", "shampoo", 188 | "sharon", "sharp", "short", "shrink", "silence", "silk", 189 | "simple", "slang", "smart", "smoke", "snake", "society", 190 | "sonar", "sonata", "soprano", "source", "sparta", "sphere", 191 | "spider", "sponsor", "spring", "acid", "adios", "agatha", 192 | "alamo", "alert", "almanac", "aloha", "andrea", "anita", 193 | "arcade", "aurora", "avalon", "baby", "baggage", "balloon", 194 | "bank", "basil", "begin", "biscuit", "blue", "bombay", 195 | "brain", "brenda", "brigade", "cable", "carmen", "cello", 196 | "celtic", "chariot", "chrome", "citrus", "civil", "cloud", 197 | "common", "compare", "cool", "copper", "coral", "crater", 198 | "cubic", "cupid", "cycle", "depend", "door", "dream", 199 | "dynasty", "edison", "edition", "enigma", "equal", "eric", 200 | "event", "evita", "exodus", "extend", "famous", "farmer", 201 | "food", "fossil", "frog", "fruit", "geneva", "gentle", 202 | "george", "giant", "gilbert", "gossip", "gram", "greek", 203 | "grille", "hammer", "harvest", "hazard", "heaven", "herbert", 204 | "heroic", "hexagon", "husband", "immune", "inca", "inch", 205 | "initial", "isabel", "ivory", "jason", "jerome", "joel", 206 | "joshua", "journal", "judge", "juliet", "jump", "justice", 207 | "kimono", "kinetic", "leonid", "lima", "maze", "medusa", 208 | "member", "memphis", "michael", "miguel", "milan", "mile", 209 | "miller", "mimic", "mimosa", "mission", "monkey", "moral", 210 | "moses", "mouse", "nancy", "natasha", "nebula", "nickel", 211 | "nina", "noise", "orchid", "oregano", "origami", "orinoco", 212 | "orion", "othello", "paper", "paprika", "prelude", "prepare", 213 | "pretend", "profit", "promise", "provide", "puzzle", "remote", 214 | "repair", "reply", "rival", "riviera", "robin", "rose", 215 | "rover", "rudolf", "saga", "sahara", "scholar", "shelter", 216 | "ship", "shoe", "sigma", "sister", "sleep", "smile", 217 | "spain", "spark", "split", "spray", "square", "stadium", 218 | "star", "storm", "story", "strange", "stretch", "stuart", 219 | "subway", "sugar", "sulfur", "summer", "survive", "sweet", 220 | "swim", "table", "taboo", "target", "teacher", "telecom", 221 | "temple", "tibet", "ticket", "tina", "today", "toga", 222 | "tommy", "tower", "trivial", "tunnel", "turtle", "twin", 223 | "uncle", "unicorn", "unique", "update", "valery", "vega", 224 | "version", "voodoo", "warning", "william", "wonder", "year", 225 | "yellow", "young", "absent", "absorb", "accent", "alfonso", 226 | "alias", "ambient", "andy", "anvil", "appear", "apropos", 227 | "archer", "ariel", "armor", "arrow", "austin", "avatar", 228 | "axis", "baboon", "bahama", "bali", "balsa", "bazooka", 229 | "beach", "beast", "beatles", "beauty", "before", "benny", 230 | "betty", "between", "beyond", "billy", "bison", "blast", 231 | "bless", "bogart", "bonanza", "book", "border", "brave", 232 | "bread", "break", "broken", "bucket", "buenos", "buffalo", 233 | "bundle", "button", "buzzer", "byte", "caesar", "camilla", 234 | "canary", "candid", "carrot", "cave", "chant", "child", 235 | "choice", "chris", "cipher", "clarion", "clark", "clever", 236 | "cliff", "clone", "conan", "conduct", "congo", "content", 237 | "costume", "cotton", "cover", "crack", "current", "danube", 238 | "data", "decide", "desire", "detail", "dexter", "dinner", 239 | "dispute", "donor", "druid", "drum", "easy", "eddie", 240 | "enjoy", "enrico", "epoxy", "erosion", "except", "exile", 241 | "explain", "fame", "fast", "father", "felix", "field", 242 | "fiona", "fire", "fish", "flame", "flex", "flipper", 243 | "float", "flood", "floor", "forbid", "forever", "fractal", 244 | "frame", "freddie", "front", "fuel", "gallop", "game", 245 | "garbo", "gate", "gibson", "ginger", "giraffe", "gizmo", 246 | "glass", "goblin", "gopher", "grace", "gray", "gregory", 247 | "grid", "griffin", "ground", "guest", "gustav", "gyro", 248 | "hair", "halt", "harris", "heart", "heavy", "herman", 249 | "hippie", "hobby", "honey", "hope", "horse", "hostel", 250 | "hydro", "imitate", "info", "ingrid", "inside", "invent", 251 | "invest", "invite", "iron", "ivan", "james", "jester", 252 | "jimmy", "join", "joseph", "juice", "julius", "july", 253 | "justin", "kansas", "karl", "kevin", "kiwi", "ladder", 254 | "lake", "laura", "learn", "legacy", "legend", "lesson", 255 | "life", "light", "list", "locate", "lopez", "lorenzo", 256 | "love", "lunch", "malta", "mammal", "margo", "marion", 257 | "mask", "match", "mayday", "meaning", "mercy", "middle", 258 | "mike", "mirror", "modest", "morph", "morris", "nadia", 259 | "nato", "navy", "needle", "neuron", "never", "newton", 260 | "nice", "night", "nissan", "nitro", "nixon", "north", 261 | "oberon", "octavia", "ohio", "olga", "open", "opus", 262 | "orca", "oval", "owner", "page", "paint", "palma", 263 | "parade", "parent", "parole", "paul", "peace", "pearl", 264 | "perform", "phoenix", "phrase", "pierre", "pinball", "place", 265 | "plate", "plato", "plume", "pogo", "point", "polite", 266 | "polka", "poncho", "powder", "prague", "press", "presto", 267 | "pretty", "prime", "promo", "quasi", "quest", "quick", 268 | "quiz", "quota", "race", "rachel", "raja", "ranger", 269 | "region", "remark", "rent", "reward", "rhino", "ribbon", 270 | "rider", "road", "rodent", "round", "rubber", "ruby", 271 | "rufus", "sabine", "saddle", "sailor", "saint", "salt", 272 | "satire", "scale", "scuba", "season", "secure", "shake", 273 | "shallow", "shannon", "shave", "shelf", "sherman", "shine", 274 | "shirt", "side", "sinatra", "sincere", "size", "slalom", 275 | "slow", "small", "snow", "sofia", "song", "sound", 276 | "south", "speech", "spell", "spend", "spoon", "stage", 277 | "stamp", "stand", "state", "stella", "stick", "sting", 278 | "stock", "store", "sunday", "sunset", "support", "sweden", 279 | "swing", "tape", "think", "thomas", "tictac", "time", 280 | "toast", "tobacco", "tonight", "torch", "torso", "touch", 281 | "toyota", "trade", "tribune", "trinity", "triton", "truck", 282 | "trust", "type", "under", "unit", "urban", "urgent", 283 | "user", "value", "vendor", "venice", "verona", "vibrate", 284 | "virgo", "visible", "vista", "vital", "voice", "vortex", 285 | "waiter", "watch", "wave", "weather", "wedding", "wheel", 286 | "whiskey", "wisdom", "deal", "null", "nurse", "quebec", 287 | "reserve", "reunion", "roof", "singer", "verbal", "amen", 288 | "ego", "fax", "jet", "job", "rio", "ski", 289 | "yes", 290 | } 291 | --------------------------------------------------------------------------------