├── .dockerignore ├── archive ├── .gitignore ├── test.zip ├── zipdoc_test.pdf ├── zipdoc_test.go ├── reader_test.go ├── writer_test.go ├── doc.go ├── blob.go ├── file.go ├── writer.go ├── zipdoc.go └── reader.go ├── version └── version.go ├── cloud ├── test.zip ├── doc.go ├── client_test.go ├── client.go └── plumbing.go ├── docs ├── console.gif ├── mput-console.png ├── print-dialog.png ├── create-print-plugin.png ├── run-shell-script-1.png ├── run-shell-script-2.png ├── create-release.md └── tutorial-print-macosx.md ├── encoding └── rm │ ├── test_v3.rm │ ├── test_v5.rm │ ├── marshal.go │ ├── unmarshal_test.go │ ├── unmarshal.go │ └── rm.go ├── annotations ├── testfiles │ ├── a3.pdf │ ├── a3.zip │ ├── a4.pdf │ ├── a4.zip │ ├── a5.pdf │ ├── a5.zip │ ├── rm.pdf │ ├── rm.zip │ ├── tmpl.zip │ ├── letter.pdf │ ├── letter.zip │ └── strange.zip ├── test.sh ├── license.go ├── pdf_test.go └── pdf.go ├── util ├── path.go ├── path_test.go └── util.go ├── Dockerfile ├── shell ├── pwd.go ├── account.go ├── version.go ├── arguments_test.go ├── refresh.go ├── nuke.go ├── rm.go ├── ls.go ├── stat.go ├── cd.go ├── get.go ├── arguments.go ├── mkdir.go ├── put.go ├── custom_completer.go ├── find.go ├── mv.go ├── geta.go ├── shell.go ├── rmfs_completer.go ├── fs_completer.go ├── mget.go └── mput.go ├── model ├── auth.go ├── node.go └── document.go ├── .gitignore ├── api ├── sync15 │ ├── remotestorage.go │ ├── entry.go │ ├── fieldreader.go │ ├── blobstorage.go │ ├── common.go │ ├── tree_test.go │ ├── blobdoc.go │ └── tree.go ├── auth_test.go ├── api.go ├── auth.go └── sync10 │ └── api.go ├── .github └── workflows │ ├── go.yml │ └── release.yml ├── filetree ├── treeutil.go ├── filetree.go └── filetree_test.go ├── log └── log.go ├── config ├── url.go ├── config.go └── config_test.go ├── script └── prepare-release.sh ├── go.mod ├── main.go ├── auth ├── transport.go ├── store.go └── auth.go ├── CHANGELOG.md ├── README.md └── go.sum /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | -------------------------------------------------------------------------------- /archive/.gitignore: -------------------------------------------------------------------------------- 1 | write.zip 2 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var Version = "dev" 4 | -------------------------------------------------------------------------------- /cloud/test.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/cloud/test.zip -------------------------------------------------------------------------------- /archive/test.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/archive/test.zip -------------------------------------------------------------------------------- /docs/console.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/console.gif -------------------------------------------------------------------------------- /archive/zipdoc_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/archive/zipdoc_test.pdf -------------------------------------------------------------------------------- /docs/mput-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/mput-console.png -------------------------------------------------------------------------------- /docs/print-dialog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/print-dialog.png -------------------------------------------------------------------------------- /encoding/rm/test_v3.rm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/encoding/rm/test_v3.rm -------------------------------------------------------------------------------- /encoding/rm/test_v5.rm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/encoding/rm/test_v5.rm -------------------------------------------------------------------------------- /annotations/testfiles/a3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a3.pdf -------------------------------------------------------------------------------- /annotations/testfiles/a3.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a3.zip -------------------------------------------------------------------------------- /annotations/testfiles/a4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a4.pdf -------------------------------------------------------------------------------- /annotations/testfiles/a4.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a4.zip -------------------------------------------------------------------------------- /annotations/testfiles/a5.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a5.pdf -------------------------------------------------------------------------------- /annotations/testfiles/a5.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/a5.zip -------------------------------------------------------------------------------- /annotations/testfiles/rm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/rm.pdf -------------------------------------------------------------------------------- /annotations/testfiles/rm.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/rm.zip -------------------------------------------------------------------------------- /docs/create-print-plugin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/create-print-plugin.png -------------------------------------------------------------------------------- /docs/run-shell-script-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/run-shell-script-1.png -------------------------------------------------------------------------------- /docs/run-shell-script-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/docs/run-shell-script-2.png -------------------------------------------------------------------------------- /annotations/testfiles/tmpl.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/tmpl.zip -------------------------------------------------------------------------------- /annotations/testfiles/letter.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/letter.pdf -------------------------------------------------------------------------------- /annotations/testfiles/letter.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/letter.zip -------------------------------------------------------------------------------- /annotations/testfiles/strange.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelusionalLogic/rmapi/HEAD/annotations/testfiles/strange.zip -------------------------------------------------------------------------------- /util/path.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "strings" 4 | 5 | func SplitPath(path string) []string { 6 | return strings.Split(path, "/") 7 | } 8 | -------------------------------------------------------------------------------- /encoding/rm/marshal.go: -------------------------------------------------------------------------------- 1 | package rm 2 | 3 | // MarshalBinary implements encoding.MarshalBinary for 4 | // transforming a Rm page into bytes 5 | // TODO 6 | func (rm *Rm) MarshalBinary() (data []byte, err error) { 7 | return nil, nil 8 | } 9 | -------------------------------------------------------------------------------- /archive/zipdoc_test.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestZipFile(t *testing.T) { 9 | d, err := CreateZipDocument("1234", "zipdoc_test.pdf") 10 | fmt.Println(d) 11 | if err != nil { 12 | t.Error(err) 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS builder 2 | RUN apk add --no-cache git 3 | 4 | WORKDIR /src 5 | COPY . . 6 | RUN CGO_ENABLED=0 go build 7 | 8 | FROM alpine:latest 9 | 10 | RUN adduser -D app 11 | USER app 12 | 13 | COPY --from=builder /src/rmapi /usr/local/bin/rmapi 14 | ENTRYPOINT ["rmapi"] 15 | -------------------------------------------------------------------------------- /shell/pwd.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import "github.com/abiosoft/ishell" 4 | 5 | func pwdCmd(ctx *ShellCtxt) *ishell.Cmd { 6 | return &ishell.Cmd{ 7 | Name: "pwd", 8 | Help: "print current directory", 9 | Func: func(c *ishell.Context) { 10 | c.Println(ctx.path) 11 | }, 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /annotations/test.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | path=$(dirname $0) 3 | go clean -testcache 4 | go test -v github.com/juruen/rmapi/annotations 5 | xdg-open /tmp/strange.pdf 6 | xdg-open /tmp/tmpl.pdf 7 | xdg-open /tmp/a3.pdf 8 | xdg-open /tmp/a4.pdf 9 | xdg-open /tmp/a5.pdf 10 | xdg-open /tmp/rm.pdf 11 | xdg-open /tmp/letter.pdf 12 | -------------------------------------------------------------------------------- /model/auth.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | type AuthTokens struct { 4 | DeviceToken string `yaml:"devicetoken"` 5 | UserToken string `yaml:"usertoken"` 6 | } 7 | 8 | type DeviceTokenRequest struct { 9 | Code string `json:"code"` 10 | DeviceDesc string `json:"deviceDesc"` 11 | DeviceId string `json:"deviceID"` 12 | } 13 | -------------------------------------------------------------------------------- /shell/account.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "github.com/abiosoft/ishell" 5 | ) 6 | 7 | func accountCmd(ctx *ShellCtxt) *ishell.Cmd { 8 | return &ishell.Cmd{ 9 | Name: "account", 10 | Help: "account info", 11 | Func: func(c *ishell.Context) { 12 | c.Printf("User: %s, SyncVersion: %d\n", ctx.UserInfo.User, ctx.UserInfo.SyncVersion) 13 | }, 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /shell/version.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "github.com/abiosoft/ishell" 5 | "github.com/juruen/rmapi/version" 6 | ) 7 | 8 | func versionCmd(ctx *ShellCtxt) *ishell.Cmd { 9 | return &ishell.Cmd{ 10 | Name: "version", 11 | Help: "show rmapi version", 12 | Func: func(c *ishell.Context) { 13 | c.Println("rmapi version:", version.Version) 14 | }, 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | rmapi 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Test artifacts 12 | test_writer.zip 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 18 | .glide/ 19 | 20 | -------------------------------------------------------------------------------- /api/sync15/remotestorage.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import "io" 4 | 5 | type RemoteStorage interface { 6 | GetRootIndex() (hash string, generation int64, err error) 7 | GetReader(hash string) (io.ReadCloser, error) 8 | } 9 | 10 | type RemoteStorageWriter interface { 11 | UpdateRootIndex(hash string, generation int64) (gen int64, err error) 12 | GetWriter(hash string, writer io.WriteCloser) error 13 | } 14 | -------------------------------------------------------------------------------- /archive/reader_test.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestRead(t *testing.T) { 9 | zip := NewZip() 10 | 11 | // open test file 12 | file, err := os.Open("test.zip") 13 | if err != nil { 14 | t.Error(err) 15 | } 16 | defer file.Close() 17 | 18 | fi, err := file.Stat() 19 | if err != nil { 20 | t.Error(err) 21 | } 22 | 23 | // read file into note 24 | err = zip.Read(file, fi.Size()) 25 | if err != nil { 26 | t.Error(err) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /annotations/license.go: -------------------------------------------------------------------------------- 1 | package annotations 2 | 3 | import ( 4 | "time" 5 | _ "unsafe" 6 | 7 | "github.com/unidoc/unipdf/v3/common/license" 8 | ) 9 | 10 | //go:linkname licenseKey github.com/unidoc/unipdf/v3/common/license.licenseKey 11 | var licenseKey *license.LicenseKey 12 | 13 | func init() { 14 | lk := license.LicenseKey{} 15 | lk.CustomerName = "community" 16 | lk.Tier = license.LicenseTierCommunity 17 | lk.CreatedAt = time.Now().UTC() 18 | lk.CreatedAtInt = lk.CreatedAt.Unix() 19 | licenseKey = &lk 20 | } 21 | -------------------------------------------------------------------------------- /shell/arguments_test.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseArguments(t *testing.T) { 10 | assert.Equal(t, []string{"foo", "bar", "baz"}, parseArguments("foo bar baz")) 11 | assert.Equal(t, []string{"foo", "bar", "baz"}, parseArguments(" foo bar baz ")) 12 | assert.Equal(t, []string{"foo", "bar\\ baz"}, parseArguments(" foo bar\\ baz ")) 13 | assert.Equal(t, []string{"foo", "bar\\ baz", "bax"}, parseArguments(" foo bar\\ baz bax")) 14 | } 15 | -------------------------------------------------------------------------------- /archive/writer_test.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestWrite(t *testing.T) { 9 | zip := NewZip() 10 | zip.Content.FileType = "pdf" 11 | zip.Content.PageCount = 1 12 | zip.Pages = append(zip.Pages, Page{Pagedata: "Blank"}) 13 | zip.Payload = []byte{'p', 'd', 'f'} 14 | 15 | // create test file 16 | file, err := os.Create("write.zip") 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | defer file.Close() 21 | 22 | // read file into note 23 | err = zip.Write(file) 24 | if err != nil { 25 | t.Error(err) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /api/sync15/entry.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | ) 7 | 8 | type Entry struct { 9 | Hash string 10 | Type string 11 | DocumentID string 12 | Subfiles int 13 | Size int64 14 | } 15 | 16 | func (d *Entry) Line() string { 17 | var sb strings.Builder 18 | sb.WriteString(d.Hash) 19 | sb.WriteRune(Delimiter) 20 | sb.WriteString(FileType) 21 | sb.WriteRune(Delimiter) 22 | sb.WriteString(d.DocumentID) 23 | sb.WriteRune(Delimiter) 24 | sb.WriteString("0") 25 | sb.WriteRune(Delimiter) 26 | sb.WriteString(strconv.FormatInt(d.Size, 10)) 27 | return sb.String() 28 | } 29 | -------------------------------------------------------------------------------- /api/sync15/fieldreader.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type FieldReader struct { 9 | index int 10 | fields []string 11 | } 12 | 13 | func (fr *FieldReader) HasNext() bool { 14 | return fr.index < len(fr.fields) 15 | } 16 | 17 | func (fr *FieldReader) Next() (string, error) { 18 | if fr.index >= len(fr.fields) { 19 | return "", fmt.Errorf("out of bounds") 20 | } 21 | res := fr.fields[fr.index] 22 | fr.index++ 23 | return res, nil 24 | } 25 | 26 | func NewFieldReader(line string) FieldReader { 27 | fld := strings.FieldsFunc(line, func(r rune) bool { return r == Delimiter }) 28 | 29 | fr := FieldReader{ 30 | index: 0, 31 | fields: fld, 32 | } 33 | return fr 34 | } 35 | -------------------------------------------------------------------------------- /shell/refresh.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/abiosoft/ishell" 7 | ) 8 | 9 | func refreshCmd(ctx *ShellCtxt) *ishell.Cmd { 10 | return &ishell.Cmd{ 11 | Name: "refresh", 12 | Help: "refreshes the tree with remote changes", 13 | Func: func(c *ishell.Context) { 14 | err := ctx.api.Refresh() 15 | if err != nil { 16 | c.Err(err) 17 | return 18 | } 19 | n, err := ctx.api.Filetree().NodeByPath(ctx.path, nil) 20 | if err != nil { 21 | c.Err(errors.New("current path is invalid")) 22 | 23 | ctx.node = ctx.api.Filetree().Root() 24 | ctx.path = ctx.node.Name() 25 | c.SetPrompt(ctx.prompt()) 26 | return 27 | } 28 | ctx.node = n 29 | }, 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /shell/nuke.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/abiosoft/ishell" 7 | ) 8 | 9 | func nukeCmd(ctx *ShellCtxt) *ishell.Cmd { 10 | return &ishell.Cmd{ 11 | Name: "nuke", 12 | Help: "deletes everything", 13 | Completer: createEntryCompleter(ctx), 14 | Func: func(c *ishell.Context) { 15 | fmt.Print("Are you sure, this will DELETE EVERYTHING! type [YES]:") 16 | var response string 17 | _, err := fmt.Scanln(&response) 18 | if err != nil { 19 | return 20 | } 21 | if response != "YES" { 22 | return 23 | } 24 | fmt.Println("Nuking") 25 | err = ctx.api.Nuke() 26 | 27 | if err != nil { 28 | c.Err(fmt.Errorf("failed to delete entry: %v", err)) 29 | return 30 | } 31 | ctx.api.Filetree().Clear() 32 | }, 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /shell/rm.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/abiosoft/ishell" 8 | ) 9 | 10 | func rmCmd(ctx *ShellCtxt) *ishell.Cmd { 11 | return &ishell.Cmd{ 12 | Name: "rm", 13 | Help: "delete entry", 14 | Completer: createEntryCompleter(ctx), 15 | Func: func(c *ishell.Context) { 16 | for _, target := range c.Args { 17 | node, err := ctx.api.Filetree().NodeByPath(target, ctx.node) 18 | 19 | if err != nil { 20 | c.Err(errors.New("entry doesn't exist")) 21 | return 22 | } 23 | 24 | err = ctx.api.DeleteEntry(node) 25 | 26 | if err != nil { 27 | c.Err(errors.New(fmt.Sprint("failed to delete entry", err))) 28 | return 29 | } 30 | 31 | ctx.api.Filetree().DeleteNode(node) 32 | } 33 | 34 | c.Println("entry(s) deleted") 35 | }, 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /util/path_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestSplitPath(t *testing.T) { 10 | dirs := SplitPath("/") 11 | assert.Equal(t, 2, len(dirs)) 12 | assert.Equal(t, "", dirs[0]) 13 | assert.Equal(t, "", dirs[1]) 14 | 15 | dirs = SplitPath("/foo") 16 | assert.Equal(t, 2, len(dirs)) 17 | assert.Equal(t, "", dirs[0]) 18 | assert.Equal(t, "foo", dirs[1]) 19 | 20 | dirs = SplitPath("/foo/bar") 21 | assert.Equal(t, 3, len(dirs)) 22 | assert.Equal(t, "", dirs[0]) 23 | assert.Equal(t, "foo", dirs[1]) 24 | assert.Equal(t, "bar", dirs[2]) 25 | 26 | dirs = SplitPath("/foo/bar/") 27 | assert.Equal(t, 4, len(dirs)) 28 | assert.Equal(t, "", dirs[0]) 29 | assert.Equal(t, "foo", dirs[1]) 30 | assert.Equal(t, "bar", dirs[2]) 31 | assert.Equal(t, "", dirs[3]) 32 | } 33 | -------------------------------------------------------------------------------- /shell/ls.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/abiosoft/ishell" 7 | ) 8 | 9 | func lsCmd(ctx *ShellCtxt) *ishell.Cmd { 10 | return &ishell.Cmd{ 11 | Name: "ls", 12 | Help: "list directory", 13 | Completer: createEntryCompleter(ctx), 14 | Func: func(c *ishell.Context) { 15 | node := ctx.node 16 | 17 | if len(c.Args) == 1 { 18 | target := c.Args[0] 19 | 20 | argNode, err := ctx.api.Filetree().NodeByPath(target, ctx.node) 21 | 22 | if err != nil || node.IsFile() { 23 | c.Err(errors.New("directory doesn't exist")) 24 | return 25 | } 26 | 27 | node = argNode 28 | } 29 | 30 | for _, e := range node.Children { 31 | eType := "d" 32 | if e.IsFile() { 33 | eType = "f" 34 | } 35 | c.Printf("[%s]\t%s\n", eType, e.Name()) 36 | } 37 | }, 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /shell/stat.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | 7 | "github.com/abiosoft/ishell" 8 | ) 9 | 10 | func statCmd(ctx *ShellCtxt) *ishell.Cmd { 11 | return &ishell.Cmd{ 12 | Name: "stat", 13 | Help: "fetch entry metadata", 14 | Completer: createEntryCompleter(ctx), 15 | Func: func(c *ishell.Context) { 16 | if len(c.Args) == 0 { 17 | c.Err(errors.New("missing source file")) 18 | return 19 | } 20 | 21 | srcName := c.Args[0] 22 | 23 | node, err := ctx.api.Filetree().NodeByPath(srcName, ctx.node) 24 | 25 | if err != nil { 26 | c.Err(errors.New("file doesn't exist")) 27 | return 28 | } 29 | 30 | jsn, err := json.MarshalIndent(node.Document, "", " ") 31 | 32 | if err != nil { 33 | c.Err(errors.New("can't serialize to json")) 34 | return 35 | } 36 | 37 | c.Println(string(jsn)) 38 | }, 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /shell/cd.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/abiosoft/ishell" 7 | ) 8 | 9 | func cdCmd(ctx *ShellCtxt) *ishell.Cmd { 10 | return &ishell.Cmd{ 11 | Name: "cd", 12 | Help: "change directory", 13 | Completer: createDirCompleter(ctx), 14 | Func: func(c *ishell.Context) { 15 | if len(c.Args) == 0 { 16 | return 17 | } 18 | 19 | target := c.Args[0] 20 | 21 | node, err := ctx.api.Filetree().NodeByPath(target, ctx.node) 22 | 23 | if err != nil || node.IsFile() { 24 | c.Err(errors.New("directory doesn't exist")) 25 | return 26 | } 27 | 28 | path, err := ctx.api.Filetree().NodeToPath(node) 29 | 30 | if err != nil || node.IsFile() { 31 | c.Err(errors.New("directory doesn't exist")) 32 | return 33 | } 34 | 35 | ctx.path = path 36 | ctx.node = node 37 | 38 | c.Println() 39 | c.SetPrompt(ctx.prompt()) 40 | }, 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /encoding/rm/unmarshal_test.go: -------------------------------------------------------------------------------- 1 | package rm 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "testing" 7 | ) 8 | 9 | func testUnmarshalBinary(t *testing.T, fn string, ver Version) *Rm { 10 | b, err := ioutil.ReadFile(fn) 11 | if err != nil { 12 | t.Errorf("can't open %s file", fn) 13 | } 14 | 15 | rm := New() 16 | err = rm.UnmarshalBinary(b) 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | 21 | if rm.Version != ver { 22 | t.Error("wrong version parsed") 23 | } 24 | 25 | t.Log(rm) 26 | 27 | fmt.Println("unmarshaling complete") 28 | 29 | return rm 30 | } 31 | 32 | func TestUnmarshalBinaryV5(t *testing.T) { 33 | rm := testUnmarshalBinary(t, "test_v5.rm", V5) 34 | for _, layer := range rm.Layers { 35 | for _, line := range layer.Lines { 36 | if line.BrushSize != 2.0 { 37 | t.Error("Incorrectly parsing BrushSize") 38 | } 39 | } 40 | } 41 | } 42 | 43 | func TestUnmarshalBinaryV3(t *testing.T) { 44 | testUnmarshalBinary(t, "test_v3.rm", V3) 45 | } 46 | -------------------------------------------------------------------------------- /annotations/pdf_test.go: -------------------------------------------------------------------------------- 1 | package annotations 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func test(name string, t *testing.T) { 9 | zip := fmt.Sprintf("testfiles/%s.zip", name) 10 | outfile := fmt.Sprintf("/tmp/%s.pdf", name) 11 | options := PdfGeneratorOptions{AddPageNumbers: true, AllPages: false, AnnotationsOnly: false} 12 | generator := CreatePdfGenerator(zip, outfile, options) 13 | err := generator.Generate() 14 | 15 | if err != nil { 16 | t.Error(err) 17 | } 18 | } 19 | func TestGenerateA3(t *testing.T) { 20 | test("a3", t) 21 | } 22 | func TestGenerateA4(t *testing.T) { 23 | test("a4", t) 24 | } 25 | 26 | func TestGenerateA5(t *testing.T) { 27 | test("a5", t) 28 | } 29 | func TestGenerateLetter(t *testing.T) { 30 | test("letter", t) 31 | } 32 | func TestGenerateRM(t *testing.T) { 33 | test("rm", t) 34 | } 35 | func TestGenerateTmpl(t *testing.T) { 36 | test("tmpl", t) 37 | } 38 | func TestGenerateStrangeBug(t *testing.T) { 39 | test("strange", t) 40 | } 41 | -------------------------------------------------------------------------------- /shell/get.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/abiosoft/ishell" 8 | ) 9 | 10 | func getCmd(ctx *ShellCtxt) *ishell.Cmd { 11 | return &ishell.Cmd{ 12 | Name: "get", 13 | Help: "copy remote file to local", 14 | Completer: createEntryCompleter(ctx), 15 | Func: func(c *ishell.Context) { 16 | if len(c.Args) == 0 { 17 | c.Err(errors.New("missing source file")) 18 | return 19 | } 20 | 21 | srcName := c.Args[0] 22 | 23 | node, err := ctx.api.Filetree().NodeByPath(srcName, ctx.node) 24 | 25 | if err != nil || node.IsDirectory() { 26 | c.Err(errors.New("file doesn't exist")) 27 | return 28 | } 29 | 30 | c.Println(fmt.Sprintf("downloading: [%s]...", srcName)) 31 | 32 | err = ctx.api.FetchDocument(node.Document.ID, fmt.Sprintf("%s.zip", node.Name())) 33 | 34 | if err == nil { 35 | c.Println("OK") 36 | return 37 | } 38 | 39 | c.Err(errors.New(fmt.Sprintf("Failed to download file %s with %s", srcName, err.Error()))) 40 | }, 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: 3 | pull_request: 4 | branches: 5 | - master 6 | push: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | 12 | build: 13 | name: Build 14 | runs-on: ubuntu-latest 15 | steps: 16 | 17 | - name: Set up Go 1.19 18 | uses: actions/setup-go@v3 19 | with: 20 | go-version: 1.19 21 | id: go 22 | 23 | - name: Check out code into the Go module directory 24 | uses: actions/checkout@v1 25 | 26 | - name: Get dependencies 27 | run: | 28 | go get -v -t -d ./... 29 | if [ -f Gopkg.toml ]; then 30 | curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh 31 | dep ensure 32 | fi 33 | 34 | - name: Format 35 | run: | 36 | out="$(gofmt -s -l . )" 37 | if [ -n "$out" ]; then 38 | echo "gofmt needs to be run on the following files:" >&2 39 | echo "$out" >&2 40 | exit 1 41 | fi 42 | 43 | - name: Build 44 | run: go build -v . 45 | 46 | - name: Tests 47 | run: go test ./... 48 | -------------------------------------------------------------------------------- /filetree/treeutil.go: -------------------------------------------------------------------------------- 1 | package filetree 2 | 3 | import ( 4 | "path" 5 | 6 | "github.com/juruen/rmapi/model" 7 | ) 8 | 9 | const ( 10 | StopVisiting = true 11 | ContinueVisiting = false 12 | ) 13 | 14 | func WalkTree(node *model.Node, visitor FileTreeVistor) { 15 | doWalkTree(node, make([]string, 0), visitor) 16 | } 17 | 18 | func doWalkTree(node *model.Node, path []string, visitor FileTreeVistor) bool { 19 | if visitor.Visit(node, path) { 20 | return StopVisiting 21 | } 22 | 23 | newPath := appendEntryPath(path, node.Name()) 24 | 25 | for _, c := range node.Children { 26 | if doWalkTree(c, newPath, visitor) { 27 | return StopVisiting 28 | } 29 | } 30 | 31 | return ContinueVisiting 32 | } 33 | 34 | func appendEntryPath(currentPath []string, entry string) []string { 35 | newPath := make([]string, len(currentPath)) 36 | copy(newPath, currentPath) 37 | newPath = append(newPath, entry) 38 | 39 | return newPath 40 | } 41 | 42 | func BuildPath(_path []string, entry string) string { 43 | if len(_path) == 0 { 44 | return entry 45 | } 46 | 47 | return path.Join(path.Join(_path...), entry) 48 | } 49 | -------------------------------------------------------------------------------- /docs/create-release.md: -------------------------------------------------------------------------------- 1 | ## Create a new release 2 | 3 | This projects leverages GitHub Actions to automate the release process. 4 | 5 | It uses [release.yml](https://github.com/juruen/rmapi/blob/master/.github/workflows/release.yml) 6 | to automatically build a new release and upload its assets when a new tag starting with `v` 7 | is pushed to the repository. 8 | 9 | The assets are: 10 | 11 | - Source code 12 | - Linux binary 13 | - MacOS binary 14 | - Windows binary 15 | 16 | For now, the process to create a release is to execute: 17 | 18 | ```sh 19 | script/prepare-release.sh 0.0.3 20 | ``` 21 | 22 | where `0.0.3` is an example of a new release version that needs to be replaced with the actual version. 23 | 24 | This script changes a few files to update the new version. It also creates the corresponding tag and outputs 25 | the two `git` commands that need to be executed. 26 | 27 | This scripts needs `gnu-sed` if you are running it from OSX. 28 | 29 | Once those two commands are run, you should see a new workflow triggered in the `Actions` tab, and eventually, 30 | the new release should show up in the `releases` sections with its corresponding assests. 31 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "io" 5 | "io/ioutil" 6 | "log" 7 | "os" 8 | ) 9 | 10 | var ( 11 | Trace *log.Logger 12 | Info *log.Logger 13 | Warning *log.Logger 14 | Error *log.Logger 15 | TracingEnabled bool 16 | TraceLevel int 17 | ) 18 | 19 | func Init( 20 | traceHandle io.Writer, 21 | infoHandle io.Writer, 22 | warningHandle io.Writer, 23 | errorHandle io.Writer) { 24 | 25 | Trace = log.New(traceHandle, 26 | "Trace: ", 27 | log.Ldate|log.Ltime|log.Lshortfile) 28 | 29 | Info = log.New(infoHandle, 30 | "INFO: ", 31 | log.Ldate|log.Ltime|log.Lshortfile) 32 | 33 | Warning = log.New(warningHandle, 34 | "WARNING: ", 35 | log.Ldate|log.Ltime|log.Lshortfile) 36 | 37 | Error = log.New(errorHandle, 38 | "ERROR: ", 39 | log.Ldate|log.Ltime|log.Lshortfile) 40 | } 41 | func init() { 42 | InitLog() 43 | } 44 | 45 | func InitLog() { 46 | info := ioutil.Discard 47 | trace := ioutil.Discard 48 | traceRmapi := os.Getenv("RMAPI_TRACE") 49 | switch traceRmapi { 50 | case "1": 51 | TracingEnabled = true 52 | trace = os.Stdout 53 | fallthrough 54 | case "2": 55 | info = os.Stdout 56 | } 57 | 58 | Init(trace, info, os.Stdout, os.Stderr) 59 | } 60 | -------------------------------------------------------------------------------- /shell/arguments.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import "strings" 4 | 5 | func parseArguments(line string) []string { 6 | words := [][]rune{} 7 | 8 | words = append(words, make([]rune, 0)) 9 | slashes := 0 10 | args := 0 11 | for _, v := range line { 12 | if v == ' ' { 13 | // Consume blank spaces between args 14 | if len(words[args]) == 0 { 15 | continue 16 | } 17 | 18 | if slashes > 0 && (slashes%2) == 1 { 19 | // Found escaped space within argument 20 | words[args] = append(words[args], v) 21 | } else { 22 | // End of argument 23 | words = append(words, make([]rune, 0)) 24 | args = args + 1 25 | } 26 | 27 | slashes = 0 28 | continue 29 | } 30 | 31 | if v == '\\' { 32 | slashes = slashes + 1 33 | } else { 34 | slashes = 0 35 | } 36 | 37 | words[args] = append(words[args], v) 38 | } 39 | 40 | result := make([]string, 0) 41 | for _, w := range words { 42 | if len(w) == 0 { 43 | continue 44 | } 45 | 46 | result = append(result, string(w)) 47 | } 48 | 49 | return result 50 | } 51 | 52 | func escapeSpaces(s string) string { 53 | return strings.Replace(s, " ", "\\ ", -1) 54 | } 55 | 56 | func unescapeSpaces(s string) string { 57 | return strings.Replace(s, "\\ ", " ", -1) 58 | } 59 | -------------------------------------------------------------------------------- /model/node.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "errors" 5 | "time" 6 | ) 7 | 8 | type Node struct { 9 | Document *Document 10 | Children map[string]*Node 11 | Parent *Node 12 | } 13 | 14 | func CreateNode(document Document) Node { 15 | return Node{&document, make(map[string]*Node, 0), nil} 16 | } 17 | 18 | func (node *Node) Name() string { 19 | return node.Document.VissibleName 20 | } 21 | 22 | func (node *Node) Id() string { 23 | return node.Document.ID 24 | } 25 | func (node *Node) Version() int { 26 | return node.Document.Version 27 | } 28 | 29 | func (node *Node) IsRoot() bool { 30 | return node.Id() == "" 31 | } 32 | 33 | func (node *Node) IsDirectory() bool { 34 | return node.Document.Type == "CollectionType" 35 | } 36 | 37 | func (node *Node) IsFile() bool { 38 | return !node.IsDirectory() 39 | } 40 | 41 | func (node *Node) EntyExists(id string) bool { 42 | _, ok := node.Children[id] 43 | return ok 44 | } 45 | 46 | func (node *Node) LastModified() (time.Time, error) { 47 | return time.Parse(time.RFC3339Nano, node.Document.ModifiedClient) 48 | } 49 | 50 | func (node *Node) FindByName(name string) (*Node, error) { 51 | for _, n := range node.Children { 52 | if n.Name() == name { 53 | return n, nil 54 | } 55 | } 56 | return nil, errors.New("entry doesn't exist") 57 | } 58 | -------------------------------------------------------------------------------- /cloud/doc.go: -------------------------------------------------------------------------------- 1 | // Package cloud is the second version of the client implemented 2 | // for interacting with the Remarkable Cloud API. It has been initiated because 3 | // the first version was not decoupled enough and could not be easily used 4 | // by external packages. The first version of the package is still available for 5 | // backward compatility purposes. 6 | // 7 | // The aim of this package is to provide simple bindings to the Remarkable Cloud API. 8 | // The design has been mostly discussed here: https://github.com/juruen/rmapi/issues/54. 9 | // It has to be high level in order to let a user easily upload, download or interact 10 | // with the storage of a Remarkable device. 11 | // 12 | // The SplitBrain reference has helped a lot to explore the Cloud API has there is no 13 | // official API from Remarkable. See: https://github.com/splitbrain/ReMarkableAPI/wiki. 14 | // 15 | // For interacting with the API, we decoupled the process of authentication 16 | // from the actual storage operations. The authentication is not handled in this package. 17 | // See the auth package from this project. 18 | // 19 | // We tend to follow the good practices from this article to write a modular http client: 20 | // https://medium.com/@marcus.olsson/writing-a-go-client-for-your-restful-api-c193a2f4998c. 21 | package cloud 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | jobs: 9 | 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | steps: 14 | 15 | - name: Set up Go 1.18 16 | uses: actions/setup-go@v3 17 | with: 18 | go-version: 1.18 19 | id: go 20 | 21 | - name: Check out code into the Go module directory 22 | uses: actions/checkout@v3 23 | 24 | - run: echo "VERSION=`echo $(git describe --tags)`" >> $GITHUB_ENV 25 | 26 | - name: Build Linux binary 27 | run: go build -v -ldflags "-X 'github.com/juruen/rmapi/version.Version=${{ env.VERSION }}'" -o rmapi . 28 | 29 | 30 | - name: Create tar 31 | run: tar czvf rmapi-linuxx86-64.tar.gz rmapi 32 | 33 | - name: Build MacOS binary 34 | run: GOOS=darwin go build -v -o rmapi . 35 | 36 | - name: Create zip 37 | run: zip rmapi-macosx.zip rmapi 38 | 39 | - name: Build Windows binary 40 | run: GOOS=windows go build -v -o rmapi.exe . 41 | 42 | - name: Create zip 43 | run: zip rmapi-win64.zip rmapi.exe 44 | 45 | - name: Release 46 | uses: docker://softprops/action-gh-release 47 | if: startsWith(github.ref, 'refs/tags/') 48 | with: 49 | files: rmapi-* 50 | env: 51 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 52 | -------------------------------------------------------------------------------- /config/url.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "os" 4 | 5 | var NewTokenDevice string 6 | var NewUserDevice string 7 | var DocHost string 8 | var ListDocs string 9 | var UpdateStatus string 10 | var UploadRequest string 11 | var DeleteEntry string 12 | var RootUrl string 13 | var PutRootUrl string 14 | 15 | var DownloadFile string 16 | 17 | func init() { 18 | docHost := "https://document-storage-production-dot-remarkable-production.appspot.com" 19 | authHost := "https://webapp-prod.cloud.remarkable.engineering" 20 | newFileHost := "https://eu.tectonic.remarkable.com" 21 | 22 | host := os.Getenv("RMAPI_DOC") 23 | if host != "" { 24 | docHost = host 25 | } 26 | 27 | host = os.Getenv("RMAPI_AUTH") 28 | 29 | if host != "" { 30 | authHost = host 31 | } 32 | host = os.Getenv("RMAPI_HOST") 33 | 34 | if host != "" { 35 | authHost = host 36 | docHost = host 37 | } 38 | 39 | NewTokenDevice = authHost + "/token/json/2/device/new" 40 | NewUserDevice = authHost + "/token/json/2/user/new" 41 | ListDocs = docHost + "/document-storage/json/2/docs" 42 | UpdateStatus = docHost + "/document-storage/json/2/upload/update-status" 43 | UploadRequest = docHost + "/document-storage/json/2/upload/request" 44 | DeleteEntry = docHost + "/document-storage/json/2/delete" 45 | 46 | DownloadFile = newFileHost + "/sync/v3/files/" 47 | RootUrl = newFileHost + "/sync/v4/root" 48 | PutRootUrl = newFileHost + "/sync/v3/root" 49 | } 50 | -------------------------------------------------------------------------------- /script/prepare-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Script to prepare a new rmapi release 6 | 7 | SED="sed" 8 | 9 | set_sed(){ 10 | if $(which gsed 2>&1 >/dev/null); then 11 | SED="gsed" 12 | fi 13 | } 14 | 15 | update_app_version(){ 16 | local version=$1 17 | $SED -i "s/var Version = \".*\"/var Version = \"$version\"/" version/version.go 18 | } 19 | 20 | update_changelog(){ 21 | local version=$1 22 | $SED -i "1c## rmapi $version ($(date "+%B %d, %Y"))" CHANGELOG.md 23 | } 24 | 25 | update_macosx_tutorial(){ 26 | local version=$1 27 | $SED -i "s/v.*\/rmapi-macosx.zip/v${version}\/rmapi-macosx.zip -o rmapi.zip/" docs/tutorial-print-macosx.md 28 | } 29 | 30 | create_tag(){ 31 | local version=$1 32 | git tag v${version} 33 | } 34 | 35 | show_git_push(){ 36 | local version=$1 37 | git diff 38 | echo 39 | echo 40 | echo "Commit and push current changes with:" 41 | echo " git commit version/version.go CHANGELOG.md docs/tutorial-print-macosx.md -m 'Release $version'" 42 | echo " git push origin HEAD:master HEAD:refs/tags/v$version" 43 | } 44 | 45 | if [ -z "$1" ]; then 46 | echo "Missing version argument" >&2 47 | echo "Usage: $0 version" >&2 48 | echo "Example: $0 0.0.10" >&2 49 | exit 1 50 | fi 51 | 52 | version=$1 53 | set_sed 54 | update_app_version $version 55 | update_changelog $version 56 | update_macosx_tutorial $version 57 | create_tag $version 58 | show_git_push $version 59 | -------------------------------------------------------------------------------- /shell/mkdir.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "path" 7 | 8 | "github.com/abiosoft/ishell" 9 | ) 10 | 11 | func mkdirCmd(ctx *ShellCtxt) *ishell.Cmd { 12 | return &ishell.Cmd{ 13 | Name: "mkdir", 14 | Help: "create a directory", 15 | Completer: createDirCompleter(ctx), 16 | Func: func(c *ishell.Context) { 17 | if len(c.Args) == 0 { 18 | c.Err(errors.New("missing directory")) 19 | return 20 | } 21 | 22 | target := c.Args[0] 23 | 24 | _, err := ctx.api.Filetree().NodeByPath(target, ctx.node) 25 | 26 | if err == nil { 27 | c.Println("entry already exists") 28 | return 29 | } 30 | 31 | parentDir := path.Dir(target) 32 | newDir := path.Base(target) 33 | 34 | if newDir == "/" || newDir == "." { 35 | c.Err(errors.New("invalid directory name")) 36 | return 37 | } 38 | 39 | parentNode, err := ctx.api.Filetree().NodeByPath(parentDir, ctx.node) 40 | 41 | if err != nil || parentNode.IsFile() { 42 | c.Err(errors.New("directory doesn't exist")) 43 | return 44 | } 45 | 46 | parentId := parentNode.Id() 47 | if parentNode.IsRoot() { 48 | parentId = "" 49 | } 50 | 51 | document, err := ctx.api.CreateDir(parentId, newDir, true) 52 | 53 | if err != nil { 54 | c.Err(errors.New(fmt.Sprint("failed to create directory", err))) 55 | return 56 | } 57 | 58 | ctx.api.Filetree().AddDocument(document) 59 | }, 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /shell/put.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/abiosoft/ishell" 8 | "github.com/juruen/rmapi/util" 9 | ) 10 | 11 | func putCmd(ctx *ShellCtxt) *ishell.Cmd { 12 | return &ishell.Cmd{ 13 | Name: "put", 14 | Help: "copy a local document to cloud", 15 | Completer: createFsEntryCompleter(), 16 | Func: func(c *ishell.Context) { 17 | if len(c.Args) == 0 { 18 | c.Err(errors.New("missing source file")) 19 | return 20 | } 21 | 22 | srcName := c.Args[0] 23 | 24 | docName, _ := util.DocPathToName(srcName) 25 | 26 | node := ctx.node 27 | var err error 28 | 29 | if len(c.Args) == 2 { 30 | node, err = ctx.api.Filetree().NodeByPath(c.Args[1], ctx.node) 31 | 32 | if err != nil || node.IsFile() { 33 | c.Err(errors.New("directory doesn't exist")) 34 | return 35 | } 36 | } 37 | 38 | _, err = ctx.api.Filetree().NodeByPath(docName, node) 39 | //TODO: force flag and overwrite 40 | if err == nil { 41 | c.Err(errors.New("entry already exists")) 42 | return 43 | } 44 | 45 | c.Printf("uploading: [%s]...", srcName) 46 | 47 | dstDir := node.Id() 48 | 49 | document, err := ctx.api.UploadDocument(dstDir, srcName, true) 50 | 51 | if err != nil { 52 | c.Err(fmt.Errorf("Failed to upload file [%s] %v", srcName, err)) 53 | return 54 | } 55 | 56 | c.Println("OK") 57 | 58 | ctx.api.Filetree().AddDocument(document) 59 | }, 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /api/sync15/blobstorage.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/juruen/rmapi/config" 7 | "github.com/juruen/rmapi/log" 8 | "github.com/juruen/rmapi/model" 9 | "github.com/juruen/rmapi/transport" 10 | ) 11 | 12 | type BlobStorage struct { 13 | http *transport.HttpClientCtx 14 | concurrency int 15 | } 16 | 17 | func NewBlobStorage(http *transport.HttpClientCtx) *BlobStorage { 18 | return &BlobStorage{ 19 | http: http, 20 | } 21 | } 22 | 23 | const ROOT_NAME = "root" 24 | 25 | func (b *BlobStorage) GetReader(hash string) (io.ReadCloser, error) { 26 | blob, _, err := b.http.GetBlobStream(config.DownloadFile + hash) 27 | return blob, err 28 | } 29 | 30 | func (b *BlobStorage) UploadBlob(hash string, reader io.Reader, size int64, checksum uint32) error { 31 | return b.http.PutBlobStream(config.DownloadFile+hash, reader, size, checksum) 32 | } 33 | 34 | func (b *BlobStorage) WriteRootIndex(roothash string, gen int64) (int64, error) { 35 | log.Info.Println("writing root with gen: ", gen) 36 | return b.http.PutRootBlobStream(config.PutRootUrl, roothash, gen) 37 | } 38 | func (b *BlobStorage) GetRootIndex() (string, int64, error) { 39 | var res model.RootRequest 40 | err := b.http.Get(transport.UserBearer, config.RootUrl, nil, &res) 41 | if err != nil { 42 | return "", 0, err 43 | } 44 | 45 | log.Info.Println("got root hash:", res.Hash) 46 | log.Info.Println("got root gen:", res.Generation) 47 | return res.Hash, res.Generation, nil 48 | 49 | } 50 | -------------------------------------------------------------------------------- /shell/custom_completer.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | type cmdToCompleter map[string]func([]string) []string 8 | 9 | type shellPathCompleter struct { 10 | cmdCompleter cmdToCompleter 11 | } 12 | 13 | func (ic shellPathCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { 14 | if len(ic.cmdCompleter) == 0 { 15 | return nil, len(line) 16 | } 17 | 18 | words := parseArguments(string(line)) 19 | 20 | var cWords []string 21 | prefix := "" 22 | if len(words) > 0 && line[pos-1] != ' ' { 23 | prefix = words[len(words)-1] 24 | cWords = ic.getWords(words[:len(words)-1], prefix) 25 | } else { 26 | cWords = ic.getWords(words, prefix) 27 | } 28 | 29 | var suggestions [][]rune 30 | for _, w := range cWords { 31 | if strings.HasPrefix(w, prefix) { 32 | suggestions = append(suggestions, []rune(strings.TrimPrefix(w, prefix))) 33 | } 34 | } 35 | if len(suggestions) == 1 && prefix != "" && string(suggestions[0]) == "" { 36 | suggestions = [][]rune{[]rune(" ")} 37 | } 38 | return suggestions, len(prefix) 39 | } 40 | 41 | func (ic shellPathCompleter) getWords(w []string, prefix string) []string { 42 | if len(w) == 0 { 43 | return make([]string, 0) 44 | } 45 | 46 | completer, ok := ic.cmdCompleter[w[0]] 47 | if !ok { 48 | return make([]string, 0) 49 | } 50 | 51 | args := make([]string, len(w)) 52 | for i, v := range w[1:] { 53 | args[i] = v 54 | } 55 | args = append(args, prefix) 56 | 57 | return completer(args) 58 | } 59 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/juruen/rmapi 2 | 3 | go 1.19 4 | 5 | replace github.com/flynn-archive/go-shlex => github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 6 | 7 | require ( 8 | github.com/abiosoft/ishell v2.0.0+incompatible 9 | github.com/golang-jwt/jwt v3.2.2+incompatible 10 | github.com/google/uuid v1.1.1 11 | github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 12 | github.com/pkg/errors v0.8.1 13 | github.com/stretchr/testify v1.5.1 14 | github.com/unidoc/unipdf/v3 v3.6.1 15 | golang.org/x/sync v0.1.0 16 | gopkg.in/yaml.v2 v2.2.8 17 | ) 18 | 19 | require ( 20 | github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db // indirect 21 | github.com/adrg/strutil v0.1.0 // indirect 22 | github.com/adrg/sysfont v0.1.0 // indirect 23 | github.com/adrg/xdg v0.2.1 // indirect 24 | github.com/chzyer/logex v1.1.10 // indirect 25 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect 26 | github.com/davecgh/go-spew v1.1.1 // indirect 27 | github.com/fatih/color v1.9.0 // indirect 28 | github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect 29 | github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect 30 | github.com/kr/pretty v0.1.0 // indirect 31 | github.com/mattn/go-colorable v0.1.6 // indirect 32 | github.com/mattn/go-isatty v0.0.12 // indirect 33 | github.com/pmezard/go-difflib v1.0.0 // indirect 34 | golang.org/x/image v0.5.0 // indirect 35 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect 36 | golang.org/x/text v0.7.0 // indirect 37 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 38 | ) 39 | -------------------------------------------------------------------------------- /util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "io" 7 | "os" 8 | "path" 9 | "strings" 10 | ) 11 | 12 | const ( 13 | PDF = "pdf" 14 | ZIP = "zip" 15 | RM = "rm" 16 | EPUB = "epub" 17 | ) 18 | 19 | var supportedExt = map[string]bool{ 20 | EPUB: true, 21 | PDF: true, 22 | ZIP: true, 23 | RM: true, 24 | } 25 | 26 | func IsFileTypeSupported(ext string) bool { 27 | return supportedExt[ext] 28 | } 29 | 30 | // DocPathToName extracts the file name and file extension (without .) from a given path 31 | func DocPathToName(p string) (name string, ext string) { 32 | tmpExt := path.Ext(p) 33 | name = strings.TrimSuffix(path.Base(p), tmpExt) 34 | ext = strings.ToLower(strings.TrimPrefix(tmpExt, ".")) 35 | return 36 | } 37 | 38 | func ToIOReader(source interface{}) (io.Reader, error) { 39 | var content []byte 40 | var err error 41 | 42 | if source == nil { 43 | return bytes.NewReader(nil), nil 44 | } 45 | 46 | content, err = json.Marshal(source) 47 | 48 | return bytes.NewReader(content), err 49 | } 50 | 51 | func CopyFile(src, dst string) (int64, error) { 52 | r, err := os.Open(src) 53 | if err != nil { 54 | return 0, err 55 | } 56 | defer r.Close() 57 | 58 | w, err := os.Create(dst) 59 | if err != nil { 60 | return 0, err 61 | } 62 | defer w.Close() 63 | 64 | n, err := io.Copy(w, r) 65 | if err != nil { 66 | return 0, err 67 | } 68 | 69 | return n, nil 70 | } 71 | 72 | // Wraps a request in a slice (serialize as json array) 73 | func InSlice(req interface{}) []interface{} { 74 | slice := make([]interface{}, 0) 75 | slice = append(slice, req) 76 | return slice 77 | } 78 | -------------------------------------------------------------------------------- /shell/find.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "path/filepath" 6 | "regexp" 7 | "strings" 8 | 9 | "github.com/abiosoft/ishell" 10 | "github.com/juruen/rmapi/filetree" 11 | "github.com/juruen/rmapi/model" 12 | ) 13 | 14 | func findCmd(ctx *ShellCtxt) *ishell.Cmd { 15 | return &ishell.Cmd{ 16 | Name: "find", 17 | Help: "find files recursively, usage: find dir [regexp]", 18 | Completer: createDirCompleter(ctx), 19 | Func: func(c *ishell.Context) { 20 | if len(c.Args) != 1 && len(c.Args) != 2 { 21 | c.Err(errors.New("missing arguments; usage find dir [regexp]")) 22 | return 23 | } 24 | 25 | start := c.Args[0] 26 | 27 | startNode, err := ctx.api.Filetree().NodeByPath(start, ctx.node) 28 | 29 | if err != nil { 30 | c.Err(errors.New("start directory doesn't exist")) 31 | return 32 | } 33 | 34 | var matchRegexp *regexp.Regexp 35 | if len(c.Args) == 2 { 36 | matchRegexp, err = regexp.Compile(c.Args[1]) 37 | if err != nil { 38 | c.Err(errors.New("failed to compile regexp")) 39 | return 40 | } 41 | } 42 | 43 | filetree.WalkTree(startNode, filetree.FileTreeVistor{ 44 | Visit: func(node *model.Node, path []string) bool { 45 | var entryType string 46 | if node.IsDirectory() { 47 | entryType = "[d] " 48 | } else { 49 | entryType = "[f] " 50 | } 51 | entryName := entryType + filepath.Join(strings.Join(path, "/"), node.Name()) 52 | 53 | if matchRegexp == nil { 54 | c.Println(entryName) 55 | return false 56 | } 57 | 58 | if !matchRegexp.Match([]byte(entryName)) { 59 | return false 60 | } 61 | 62 | c.Println(entryName) 63 | 64 | return false 65 | }, 66 | }) 67 | }, 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /archive/doc.go: -------------------------------------------------------------------------------- 1 | // Package archive is used to parse a .zip file retrieved 2 | // by the API. 3 | // 4 | // Here is the content of an archive retried on the tablet as example: 5 | // 384327f5-133e-49c8-82ff-30aa19f3cfa4.content 6 | // 384327f5-133e-49c8-82ff-30aa19f3cfa4//0-metadata.json 7 | // 384327f5-133e-49c8-82ff-30aa19f3cfa4//0.rm 8 | // 384327f5-133e-49c8-82ff-30aa19f3cfa4.pagedata 9 | // 384327f5-133e-49c8-82ff-30aa19f3cfa4.thumbnails/0.jpg 10 | // 11 | // As the .zip file from remarkable is simply a normal .zip file 12 | // containing specific file formats, this package is a helper to 13 | // read and write zip files with the correct format expected by 14 | // the tablet. 15 | // 16 | // At the core of this archive package, we have the Zip struct 17 | // that is defined and that represents a Remarkable zip file. 18 | // Then it provides a Zip.Read() method to unmarshal data 19 | // from an io.Reader into a Zip struct and a Zip.Write() method 20 | // to marshal a Zip struct into a io.Writer. 21 | // 22 | // In order to correctly use this package, you will have to understand 23 | // the format of a Remarkable zip file, and the format of the files 24 | // that it contains. 25 | // 26 | // You can find some help about the format at the following URL: 27 | // https://remarkablewiki.com/tech/filesystem 28 | // 29 | // You can also display the go documentation of public structs of this package 30 | // to have more information. This will be completed in the future hopefully 31 | // to have a precise overall documentation directly held in this Golang package. 32 | // 33 | // Note that the binary format ".rm" holding the drawing contained in a zip has 34 | // a dedicated golang package and is not decoded/encoded from the archive package. 35 | // See encoding/rm in this repository. 36 | // 37 | // To have a more concrete example, see the test files of this package. 38 | package archive 39 | -------------------------------------------------------------------------------- /shell/mv.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "path" 7 | 8 | "github.com/abiosoft/ishell" 9 | ) 10 | 11 | func mvCmd(ctx *ShellCtxt) *ishell.Cmd { 12 | return &ishell.Cmd{ 13 | Name: "mv", 14 | Help: "mv file or directory", 15 | Completer: createEntryCompleter(ctx), 16 | Func: func(c *ishell.Context) { 17 | if len(c.Args) < 2 { 18 | c.Err(errors.New("missing source and/or destination")) 19 | return 20 | } 21 | 22 | src := c.Args[0] 23 | 24 | srcNode, err := ctx.api.Filetree().NodeByPath(src, ctx.node) 25 | 26 | if err != nil { 27 | c.Err(errors.New("source entry doesn't exist")) 28 | return 29 | } 30 | 31 | dst := c.Args[1] 32 | 33 | dstNode, err := ctx.api.Filetree().NodeByPath(dst, ctx.node) 34 | 35 | if dstNode != nil && dstNode.IsFile() { 36 | c.Err(errors.New("destination entry already exists")) 37 | return 38 | } 39 | 40 | // We are moving the node to antoher directory 41 | if dstNode != nil && dstNode.IsDirectory() { 42 | n, err := ctx.api.MoveEntry(srcNode, dstNode, srcNode.Name()) 43 | 44 | if err != nil { 45 | c.Err(errors.New(fmt.Sprint("failed to move entry", err))) 46 | return 47 | } 48 | 49 | ctx.api.Filetree().MoveNode(srcNode, n) 50 | return 51 | } 52 | 53 | // We are renaming the node 54 | parentDir := path.Dir(dst) 55 | newEntry := path.Base(dst) 56 | 57 | parentNode, err := ctx.api.Filetree().NodeByPath(parentDir, ctx.node) 58 | 59 | if err != nil || parentNode.IsFile() { 60 | c.Err(errors.New("directory doesn't exist")) 61 | return 62 | } 63 | 64 | n, err := ctx.api.MoveEntry(srcNode, parentNode, newEntry) 65 | 66 | if err != nil { 67 | c.Err(errors.New(fmt.Sprint("failed to move entry", err))) 68 | return 69 | } 70 | 71 | ctx.api.Filetree().MoveNode(srcNode, n) 72 | }, 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/juruen/rmapi/api" 9 | "github.com/juruen/rmapi/config" 10 | "github.com/juruen/rmapi/log" 11 | "github.com/juruen/rmapi/shell" 12 | "github.com/juruen/rmapi/version" 13 | ) 14 | 15 | const AUTH_RETRIES = 3 16 | 17 | func parseOfflineCommands(cmd []string) bool { 18 | if len(cmd) == 0 { 19 | return false 20 | } 21 | 22 | switch cmd[0] { 23 | case "reset": 24 | configFile, err := config.ConfigPath() 25 | if err != nil { 26 | log.Error.Fatalln(err) 27 | } 28 | if err := os.Remove(configFile); err != nil { 29 | log.Error.Fatalln(err) 30 | } 31 | return true 32 | case "version": 33 | fmt.Println(version.Version) 34 | return true 35 | } 36 | return false 37 | } 38 | 39 | func main() { 40 | ni := flag.Bool("ni", false, "not interactive (prevents asking for code)") 41 | flag.Usage = func() { 42 | fmt.Println(` 43 | help detailed commands, but the user needs to be logged in 44 | 45 | Offline Commands: 46 | version prints the version 47 | reset removes the config file `) 48 | 49 | flag.PrintDefaults() 50 | } 51 | flag.Parse() 52 | otherFlags := flag.Args() 53 | if parseOfflineCommands(otherFlags) { 54 | return 55 | } 56 | 57 | var ctx api.ApiCtx 58 | var err error 59 | var userInfo *api.UserInfo 60 | 61 | for i := 0; i < AUTH_RETRIES; i++ { 62 | authCtx := api.AuthHttpCtx(i > 0, *ni) 63 | 64 | userInfo, err = api.ParseToken(authCtx.Tokens.UserToken) 65 | if err != nil { 66 | log.Trace.Println(err) 67 | continue 68 | } 69 | 70 | ctx, err = api.CreateApiCtx(authCtx, userInfo.SyncVersion) 71 | if err != nil { 72 | log.Trace.Println(err) 73 | } else { 74 | break 75 | } 76 | } 77 | 78 | if err != nil { 79 | log.Error.Fatal("failed to build documents tree, last error: ", err) 80 | } 81 | 82 | err = shell.RunShell(ctx, userInfo, otherFlags) 83 | 84 | if err != nil { 85 | log.Error.Println("Error: ", err) 86 | 87 | os.Exit(1) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /cloud/client_test.go: -------------------------------------------------------------------------------- 1 | //go:build withauth 2 | // +build withauth 3 | 4 | package cloud 5 | 6 | import ( 7 | "io/ioutil" 8 | "os" 9 | "testing" 10 | 11 | "github.com/juruen/rmapi/auth" 12 | ) 13 | 14 | const testUUID = "883ba04f-606c-41b7-8903-8d113356850f" 15 | const testName = "test-api" 16 | 17 | func TestList(t *testing.T) { 18 | cli := NewClient(auth.New().Client()) 19 | 20 | docs, err := cli.List() 21 | if err != nil { 22 | t.Fatalf("test: %v", err) 23 | } 24 | 25 | for _, doc := range docs { 26 | t.Log(doc) 27 | } 28 | } 29 | 30 | func TestUpload(t *testing.T) { 31 | cli := NewClient(auth.New().Client()) 32 | 33 | // open test file 34 | file, err := os.Open("test.zip") 35 | if err != nil { 36 | t.Fatalf("test: %v", err) 37 | } 38 | defer file.Close() 39 | 40 | if err := cli.Upload(testUUID, testName, file); err != nil { 41 | t.Fatalf("test: %v", err) 42 | } 43 | } 44 | 45 | func TestDownload(t *testing.T) { 46 | cli := NewClient(auth.New().Client()) 47 | 48 | file, err := ioutil.TempFile("", "rmapi-test-*.zip") 49 | if err != nil { 50 | t.Fatalf("test: can't create temporary file: %v", err) 51 | } 52 | t.Log("path of the created file", file.Name()) 53 | 54 | if err := cli.Download(testUUID, file); err != nil { 55 | t.Fatalf("test: can't download file: %v", err) 56 | } 57 | } 58 | 59 | func TestCreateFolder(t *testing.T) { 60 | cli := NewClient(auth.New().Client()) 61 | 62 | if _, err := cli.CreateFolder("test-folder", ""); err != nil { 63 | t.Fatalf("test: can't create folder: %v", err) 64 | } 65 | } 66 | 67 | func TestBookmark(t *testing.T) { 68 | cli := NewClient(auth.New().Client()) 69 | 70 | doc := Document{ 71 | ID: testUUID, 72 | Name: testName, 73 | Bookmarked: true, 74 | } 75 | 76 | if err := cli.Metadata(doc); err != nil { 77 | t.Fatalf("test: can't bookmark document: %v", err) 78 | } 79 | } 80 | 81 | func TestDelete(t *testing.T) { 82 | cli := NewClient(auth.New().Client()) 83 | 84 | if err := cli.Delete(testUUID); err != nil { 85 | t.Fatalf("test: can't delete document: %v", err) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /shell/geta.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "fmt" 7 | 8 | "github.com/abiosoft/ishell" 9 | "github.com/juruen/rmapi/annotations" 10 | ) 11 | 12 | func getACmd(ctx *ShellCtxt) *ishell.Cmd { 13 | return &ishell.Cmd{ 14 | Name: "geta", 15 | Help: "copy remote file to local and generate a PDF with its annotations", 16 | Completer: createEntryCompleter(ctx), 17 | Func: func(c *ishell.Context) { 18 | 19 | flagSet := flag.NewFlagSet("geta", flag.ContinueOnError) 20 | addPageNumbers := flagSet.Bool("p", false, "add page numbers") 21 | allPages := flagSet.Bool("a", false, "all pages") 22 | annotationsOnly := flagSet.Bool("n", false, "annotations only") 23 | if err := flagSet.Parse(c.Args); err != nil { 24 | if err != flag.ErrHelp { 25 | c.Err(err) 26 | } 27 | return 28 | } 29 | argRest := flagSet.Args() 30 | if len(argRest) == 0 { 31 | c.Err(errors.New("missing source file")) 32 | return 33 | } 34 | 35 | srcName := argRest[0] 36 | 37 | node, err := ctx.api.Filetree().NodeByPath(srcName, ctx.node) 38 | 39 | if err != nil || node.IsDirectory() { 40 | c.Err(errors.New("file doesn't exist")) 41 | return 42 | } 43 | 44 | c.Println(fmt.Sprintf("downloading: [%s]...", srcName)) 45 | 46 | zipName := fmt.Sprintf("%s.zip", node.Name()) 47 | err = ctx.api.FetchDocument(node.Document.ID, zipName) 48 | 49 | if err != nil { 50 | c.Err(errors.New(fmt.Sprintf("Failed to download file %s with %s", srcName, err.Error()))) 51 | return 52 | } 53 | 54 | pdfName := fmt.Sprintf("%s-annotations.pdf", node.Name()) 55 | options := annotations.PdfGeneratorOptions{AddPageNumbers: *addPageNumbers, AllPages: *allPages, AnnotationsOnly: *annotationsOnly} 56 | generator := annotations.CreatePdfGenerator(zipName, pdfName, options) 57 | err = generator.Generate() 58 | 59 | if err != nil { 60 | c.Err(errors.New(fmt.Sprintf("Failed to generate annotations for %s with %s", srcName, err.Error()))) 61 | return 62 | } 63 | 64 | c.Printf("Annotations generated in: %s\n", pdfName) 65 | }, 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /api/sync15/common.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "encoding/json" 7 | "os" 8 | "path" 9 | "sort" 10 | 11 | "github.com/juruen/rmapi/log" 12 | ) 13 | 14 | func HashEntries(entries []*Entry) (string, error) { 15 | sort.Slice(entries, func(i, j int) bool { return entries[i].DocumentID < entries[j].DocumentID }) 16 | hasher := sha256.New() 17 | for _, d := range entries { 18 | //TODO: back and forth converting 19 | bh, err := hex.DecodeString(d.Hash) 20 | if err != nil { 21 | return "", err 22 | } 23 | hasher.Write(bh) 24 | } 25 | hash := hasher.Sum(nil) 26 | hashStr := hex.EncodeToString(hash) 27 | return hashStr, nil 28 | } 29 | 30 | func getCachedTreePath() (string, error) { 31 | cachedir, err := os.UserCacheDir() 32 | if err != nil { 33 | return "", err 34 | } 35 | rmapiFolder := path.Join(cachedir, "rmapi") 36 | err = os.MkdirAll(rmapiFolder, 0700) 37 | if err != nil { 38 | return "", err 39 | } 40 | cacheFile := path.Join(rmapiFolder, ".tree") 41 | return cacheFile, nil 42 | } 43 | 44 | const cacheVersion = 3 45 | 46 | func loadTree() (*HashTree, error) { 47 | cacheFile, err := getCachedTreePath() 48 | if err != nil { 49 | return nil, err 50 | } 51 | tree := &HashTree{} 52 | if _, err := os.Stat(cacheFile); err == nil { 53 | b, err := os.ReadFile(cacheFile) 54 | if err != nil { 55 | return nil, err 56 | } 57 | err = json.Unmarshal(b, tree) 58 | if err != nil { 59 | log.Error.Println("cache corrupt") 60 | return tree, nil 61 | } 62 | if tree.CacheVersion != cacheVersion { 63 | log.Info.Println("wrong cache file version, resync") 64 | return &HashTree{}, nil 65 | } 66 | } 67 | log.Info.Println("cache loaded: ", cacheFile) 68 | 69 | return tree, nil 70 | } 71 | 72 | func saveTree(tree *HashTree) error { 73 | cacheFile, err := getCachedTreePath() 74 | log.Info.Println("Writing cache: ", cacheFile) 75 | if err != nil { 76 | return err 77 | } 78 | tree.CacheVersion = cacheVersion 79 | b, err := json.MarshalIndent(tree, "", "") 80 | if err != nil { 81 | return err 82 | } 83 | err = os.WriteFile(cacheFile, b, 0644) 84 | return err 85 | } 86 | -------------------------------------------------------------------------------- /shell/shell.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/abiosoft/ishell" 8 | "github.com/juruen/rmapi/api" 9 | "github.com/juruen/rmapi/model" 10 | ) 11 | 12 | type ShellCtxt struct { 13 | node *model.Node 14 | api api.ApiCtx 15 | path string 16 | useHiddenFiles bool 17 | UserInfo api.UserInfo 18 | } 19 | 20 | func (ctx *ShellCtxt) prompt() string { 21 | return fmt.Sprintf("[%s]>", ctx.path) 22 | } 23 | 24 | func setCustomCompleter(shell *ishell.Shell) { 25 | cmdCompleter := make(cmdToCompleter) 26 | for _, cmd := range shell.Cmds() { 27 | cmdCompleter[cmd.Name] = cmd.Completer 28 | } 29 | 30 | completer := shellPathCompleter{cmdCompleter} 31 | shell.CustomCompleter(completer) 32 | } 33 | 34 | func useHiddenFiles() bool { 35 | val, ok := os.LookupEnv("RMAPI_USE_HIDDEN_FILES") 36 | 37 | if !ok { 38 | return false 39 | } 40 | 41 | return val != "0" 42 | } 43 | 44 | func RunShell(apiCtx api.ApiCtx, userInfo *api.UserInfo, args []string) error { 45 | shell := ishell.New() 46 | ctx := &ShellCtxt{ 47 | node: apiCtx.Filetree().Root(), 48 | api: apiCtx, 49 | path: apiCtx.Filetree().Root().Name(), 50 | useHiddenFiles: useHiddenFiles(), 51 | UserInfo: *userInfo, 52 | } 53 | 54 | shell.SetPrompt(ctx.prompt()) 55 | 56 | shell.AddCmd(lsCmd(ctx)) 57 | shell.AddCmd(pwdCmd(ctx)) 58 | shell.AddCmd(cdCmd(ctx)) 59 | shell.AddCmd(getCmd(ctx)) 60 | shell.AddCmd(mgetCmd(ctx)) 61 | shell.AddCmd(mkdirCmd(ctx)) 62 | shell.AddCmd(rmCmd(ctx)) 63 | shell.AddCmd(mvCmd(ctx)) 64 | shell.AddCmd(putCmd(ctx)) 65 | shell.AddCmd(mputCmd(ctx)) 66 | shell.AddCmd(versionCmd(ctx)) 67 | shell.AddCmd(statCmd(ctx)) 68 | shell.AddCmd(getACmd(ctx)) 69 | shell.AddCmd(findCmd(ctx)) 70 | shell.AddCmd(nukeCmd(ctx)) 71 | shell.AddCmd(accountCmd(ctx)) 72 | shell.AddCmd(refreshCmd(ctx)) 73 | 74 | setCustomCompleter(shell) 75 | 76 | if len(args) > 0 { 77 | return shell.Process(args...) 78 | } else { 79 | shell.Printf("ReMarkable Cloud API Shell, User: %s, SyncVersion: %s\n", userInfo.User, userInfo.SyncVersion) 80 | shell.Run() 81 | 82 | return nil 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /auth/transport.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "net/http" 5 | "sync" 6 | ) 7 | 8 | // Transport is an http.RoundTripper that makes requests to 9 | // the Remarkable Cloud API wrapping a base RoundTripper and 10 | // adding an Authorization header with a token from the supplied Auth 11 | type Transport struct { 12 | // Auth supplies the token to add to outgoing requests' 13 | // Authorization headers. 14 | Auth *Auth 15 | 16 | // Base is the base RoundTripper used to make HTTP requests. 17 | // If nil, http.DefaultTransport is used. 18 | Base http.RoundTripper 19 | 20 | // Guard for avoiding multi authentications. 21 | mu sync.Mutex 22 | } 23 | 24 | // RoundTrip authorizes and authenticates the request with an 25 | // access token from Transport's Auth. 26 | // 27 | // RoundTrip makes sure req.Body is closed anyway. 28 | // RoundTrip is cloning the original request to respect the RoundTripper contract. 29 | // 30 | // In order to avoid having two authenticating requests at the same time 31 | // we make use of a Mutex. 32 | func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { 33 | t.mu.Lock() 34 | token, err := t.Auth.Token() 35 | if err != nil { 36 | if req.Body != nil { 37 | req.Body.Close() 38 | } 39 | return nil, err 40 | } 41 | t.mu.Unlock() 42 | 43 | req2 := cloneRequest(req) // to respect to RoundTripper contract 44 | req2.Header.Set("Authorization", "Bearer "+token) 45 | 46 | res, err := t.base().RoundTrip(req2) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | return res, nil 52 | } 53 | 54 | func (t *Transport) base() http.RoundTripper { 55 | if t.Base != nil { 56 | return t.Base 57 | } 58 | return http.DefaultTransport 59 | } 60 | 61 | // cloneRequest returns a clone of the provided *http.Request. 62 | // The clone is a shallow copy of the struct and its Header map. 63 | // In order to respect the http.RoundTripper interface contract, 64 | // we should normally be doing a full deep copy. 65 | func cloneRequest(req *http.Request) *http.Request { 66 | // shallow copy of the struct 67 | copy := new(http.Request) 68 | *copy = *req 69 | // deep copy of the Header 70 | copy.Header = make(http.Header, len(req.Header)) 71 | for k, s := range req.Header { 72 | copy.Header[k] = append([]string(nil), s...) 73 | } 74 | return copy 75 | } 76 | -------------------------------------------------------------------------------- /api/auth_test.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/juruen/rmapi/transport" 8 | ) 9 | 10 | func TestAuthHttpCtx(t *testing.T) { 11 | type args struct { 12 | reAuth bool 13 | nonInteractive bool 14 | } 15 | tests := []struct { 16 | name string 17 | args args 18 | want *transport.HttpClientCtx 19 | }{ 20 | // TODO: Add test cases. 21 | } 22 | for _, tt := range tests { 23 | t.Run(tt.name, func(t *testing.T) { 24 | if got := AuthHttpCtx(tt.args.reAuth, tt.args.nonInteractive); !reflect.DeepEqual(got, tt.want) { 25 | t.Errorf("AuthHttpCtx() = %v, want %v", got, tt.want) 26 | } 27 | }) 28 | } 29 | } 30 | 31 | func Test_readCode(t *testing.T) { 32 | tests := []struct { 33 | name string 34 | want string 35 | }{ 36 | // TODO: Add test cases. 37 | } 38 | for _, tt := range tests { 39 | t.Run(tt.name, func(t *testing.T) { 40 | if got := readCode(); got != tt.want { 41 | t.Errorf("readCode() = %v, want %v", got, tt.want) 42 | } 43 | }) 44 | } 45 | } 46 | 47 | func Test_newDeviceToken(t *testing.T) { 48 | type args struct { 49 | http *transport.HttpClientCtx 50 | code string 51 | } 52 | tests := []struct { 53 | name string 54 | args args 55 | want string 56 | wantErr bool 57 | }{ 58 | // TODO: Add test cases. 59 | } 60 | for _, tt := range tests { 61 | t.Run(tt.name, func(t *testing.T) { 62 | got, err := newDeviceToken(tt.args.http, tt.args.code) 63 | if (err != nil) != tt.wantErr { 64 | t.Errorf("newDeviceToken() error = %v, wantErr %v", err, tt.wantErr) 65 | return 66 | } 67 | if got != tt.want { 68 | t.Errorf("newDeviceToken() = %v, want %v", got, tt.want) 69 | } 70 | }) 71 | } 72 | } 73 | 74 | func Test_newUserToken(t *testing.T) { 75 | type args struct { 76 | http *transport.HttpClientCtx 77 | } 78 | tests := []struct { 79 | name string 80 | args args 81 | want string 82 | wantErr bool 83 | }{ 84 | // TODO: Add test cases. 85 | } 86 | for _, tt := range tests { 87 | t.Run(tt.name, func(t *testing.T) { 88 | got, err := newUserToken(tt.args.http) 89 | if (err != nil) != tt.wantErr { 90 | t.Errorf("newUserToken() error = %v, wantErr %v", err, tt.wantErr) 91 | return 92 | } 93 | if got != tt.want { 94 | t.Errorf("newUserToken() = %v, want %v", got, tt.want) 95 | } 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /api/sync15/tree_test.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "io" 5 | "strings" 6 | "testing" 7 | ) 8 | 9 | func TestParseLine(t *testing.T) { 10 | line := "hash:0:docid:0:993" 11 | entry, err := parseEntry(line) 12 | if err != nil { 13 | t.Error(err) 14 | } 15 | 16 | if entry.Hash != "hash" { 17 | t.Error("wrong hash") 18 | } 19 | if entry.DocumentID != "docid" { 20 | t.Error("wrong documentid") 21 | } 22 | 23 | if entry.Size != 993 { 24 | t.Error("wrong size") 25 | } 26 | } 27 | 28 | func TestParseIndex(t *testing.T) { 29 | index := `3 30 | 0f83178c4ebe6a60fae0360b74916ee9e1faa5de1c56ab3481eccdc5cb98754f:0:fe0039fb-56a0-4561-a36f-a820f0009622.content:0:993 31 | 17eca6c9a540c993f5f5506bb09b7a40993c02fa8f065b1a6a442e412cf2fd04:0:fe0039fb-56a0-4561-a36f-a820f0009622.metadata:0:320` 32 | entries, err := parseIndex(strings.NewReader(index)) 33 | if err != nil { 34 | t.Error(err) 35 | return 36 | } 37 | if len(entries) != 2 { 38 | t.Error("wrong number of entries") 39 | return 40 | } 41 | } 42 | 43 | func TestCreateDocIndex(t *testing.T) { 44 | doc := &BlobDoc{ 45 | Entry: Entry{ 46 | Hash: "somehash", 47 | DocumentID: "someid", 48 | }, 49 | } 50 | file := &Entry{ 51 | Hash: "blah", 52 | DocumentID: "someid", 53 | Size: 10, 54 | } 55 | doc.AddFile(file) 56 | reader, err := doc.IndexReader() 57 | if err != nil { 58 | t.Error(err) 59 | return 60 | } 61 | index, err := io.ReadAll(reader) 62 | if err != nil { 63 | t.Error(err) 64 | return 65 | } 66 | expected := `3 67 | blah:0:someid:0:10 68 | ` 69 | strIndex := string(index) 70 | 71 | if strIndex != expected { 72 | t.Errorf("index did not match %s", strIndex) 73 | return 74 | } 75 | } 76 | 77 | func TestCreateRootIndex(t *testing.T) { 78 | tree := HashTree{} 79 | doc := &BlobDoc{ 80 | Entry: Entry{ 81 | Hash: "somehash", 82 | DocumentID: "someid"}, 83 | } 84 | file := &Entry{} 85 | doc.AddFile(file) 86 | tree.Add(doc) 87 | reader, err := tree.IndexReader() 88 | if err != nil { 89 | t.Error(err) 90 | return 91 | } 92 | index, err := io.ReadAll(reader) 93 | if err != nil { 94 | t.Error(err) 95 | return 96 | } 97 | expected := `3 98 | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855:80000000:someid:1:0 99 | ` 100 | strIndex := string(index) 101 | 102 | if strIndex != expected { 103 | t.Errorf("index did not match %s", strIndex) 104 | return 105 | } 106 | 107 | } 108 | -------------------------------------------------------------------------------- /auth/store.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "os/user" 7 | "path/filepath" 8 | 9 | "gopkg.in/yaml.v2" 10 | ) 11 | 12 | const defaultFile = ".rmapi" 13 | 14 | // TokenSet contains tokens needed for the Remarkable Cloud authentication. 15 | type TokenSet struct { 16 | // DeviceToken is a token that gets returned after 17 | // registering a device to the API. It can be fetched using the RegisterDevice method 18 | // or can be set manually for caching purposes. 19 | DeviceToken string `yaml:"devicetoken"` 20 | 21 | // UserToken is a token that gets returned as a second step, by the help of a previously 22 | // fetched DeviceToken and is actually used to make the proper authenticated 23 | // HTTP calls to the Remarkable API. Set to empty to force fetching it again. 24 | UserToken string `yaml:"usertoken"` 25 | } 26 | 27 | // TokenStore is an interface that will allow 28 | // to load and save tokens needed for the Remarkable Cloud API. 29 | type TokenStore interface { 30 | Save(t TokenSet) error 31 | Load() (TokenSet, error) 32 | } 33 | 34 | // FileTokenStore implements TokenStore by fetching and saving 35 | // tokens to a plain file. 36 | type FileTokenStore struct { 37 | Path string 38 | } 39 | 40 | // path returns the path of the file containing 41 | // the configuration. If path is not defined, it falls back 42 | // to the default one ($HOME/.rmapi). 43 | func (ft *FileTokenStore) path() string { 44 | if ft.Path != "" { 45 | return ft.Path 46 | } 47 | 48 | // assume not returning error 49 | usr, _ := user.Current() 50 | 51 | return filepath.Join(usr.HomeDir, defaultFile) 52 | } 53 | 54 | // Save will persist a TokenSet into a yaml file. 55 | func (ft *FileTokenStore) Save(t TokenSet) error { 56 | content, err := yaml.Marshal(t) 57 | 58 | if err != nil { 59 | return err 60 | } 61 | 62 | if err := ioutil.WriteFile(ft.path(), content, 0600); err != nil { 63 | return err 64 | } 65 | 66 | return nil 67 | } 68 | 69 | // Load will return a TokenSet with content populated 70 | // from a yaml file containing the values. 71 | func (ft *FileTokenStore) Load() (TokenSet, error) { 72 | // return empty struct if file does not exist 73 | if _, err := os.Stat(ft.path()); os.IsNotExist(err) { 74 | return TokenSet{}, nil 75 | } 76 | 77 | content, err := ioutil.ReadFile(ft.path()) 78 | if err != nil { 79 | return TokenSet{}, err 80 | } 81 | 82 | var tks TokenSet 83 | err = yaml.Unmarshal(content, &tks) 84 | if err != nil { 85 | return TokenSet{}, err 86 | } 87 | 88 | return tks, nil 89 | } 90 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/juruen/rmapi/log" 10 | "github.com/juruen/rmapi/model" 11 | "gopkg.in/yaml.v2" 12 | ) 13 | 14 | const ( 15 | defaultConfigFile = ".rmapi" 16 | defaultConfigFileXDG = "rmapi.conf" 17 | appName = "rmapi" 18 | configFileEnvVar = "RMAPI_CONFIG" 19 | ) 20 | 21 | /* 22 | ConfigPath returns the path to the config file. It will check the following in order: 23 | - If the RMAPI_CONFIG environment variable is set, it will use that path. 24 | - If a config file exists in the user's home dir as described by os.UserHomeDir, it will use that. 25 | - Otherwise, it will use the XDG config dir, as described by os.UserConfigDir. 26 | */ 27 | func ConfigPath() (string, error) { 28 | if config, ok := os.LookupEnv(configFileEnvVar); ok { 29 | return config, nil 30 | } 31 | 32 | home, err := os.UserHomeDir() 33 | if err != nil { 34 | return "", fmt.Errorf("failed to get current user: %w", err) 35 | } 36 | 37 | config := filepath.Join(home, defaultConfigFile) 38 | 39 | //return config in home if exists 40 | if _, err := os.Stat(config); err == nil { 41 | return config, nil 42 | } 43 | 44 | configDir, err := os.UserConfigDir() 45 | if err != nil { 46 | log.Warning.Println("cannot determine config dir, using HOME", err) 47 | return config, nil 48 | } 49 | 50 | xdgConfigDir := filepath.Join(configDir, appName) 51 | if err := os.MkdirAll(xdgConfigDir, 0700); err != nil { 52 | log.Error.Panicln("cannot create config dir "+xdgConfigDir, err) 53 | } 54 | config = filepath.Join(xdgConfigDir, defaultConfigFileXDG) 55 | 56 | return config, nil 57 | 58 | } 59 | 60 | func LoadTokens(path string) model.AuthTokens { 61 | tokens := model.AuthTokens{} 62 | 63 | if _, err := os.Stat(path); os.IsNotExist(err) { 64 | log.Trace.Printf("config fail %s doesn't exist/n", path) 65 | return tokens 66 | } 67 | 68 | content, err := ioutil.ReadFile(path) 69 | 70 | if err != nil { 71 | log.Warning.Printf("failed to open %s with %s/n", path, err) 72 | return tokens 73 | } 74 | 75 | err = yaml.Unmarshal(content, &tokens) 76 | 77 | if err != nil { 78 | log.Error.Fatalln("failed to parse", path) 79 | } 80 | 81 | return tokens 82 | } 83 | 84 | func SaveTokens(path string, tokens model.AuthTokens) { 85 | content, err := yaml.Marshal(tokens) 86 | 87 | if err != nil { 88 | log.Warning.Println("failed to marsha tokens", err) 89 | } 90 | 91 | ioutil.WriteFile(path, content, 0600) 92 | 93 | if err != nil { 94 | log.Warning.Println("failed to save config to", path) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /api/api.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log" 7 | "strings" 8 | "time" 9 | 10 | "github.com/golang-jwt/jwt" 11 | "github.com/juruen/rmapi/api/sync10" 12 | "github.com/juruen/rmapi/api/sync15" 13 | "github.com/juruen/rmapi/filetree" 14 | "github.com/juruen/rmapi/model" 15 | "github.com/juruen/rmapi/transport" 16 | ) 17 | 18 | type ApiCtx interface { 19 | Filetree() *filetree.FileTreeCtx 20 | FetchDocument(docId, dstPath string) error 21 | CreateDir(parentId, name string, notify bool) (*model.Document, error) 22 | UploadDocument(parentId string, sourceDocPath string, notify bool) (*model.Document, error) 23 | MoveEntry(src, dstDir *model.Node, name string) (*model.Node, error) 24 | DeleteEntry(node *model.Node) error 25 | SyncComplete() error 26 | Nuke() error 27 | Refresh() error 28 | } 29 | 30 | type UserToken struct { 31 | Auth0 struct { 32 | UserID string 33 | Email string 34 | } `json:"auth0-profile"` 35 | Scopes string 36 | *jwt.StandardClaims 37 | } 38 | 39 | type SyncVersion int 40 | 41 | const ( 42 | Version10 SyncVersion = 10 43 | Version15 SyncVersion = 15 44 | ) 45 | 46 | func (s SyncVersion) String() string { 47 | switch s { 48 | case Version10: 49 | return "1.0" 50 | case Version15: 51 | return "1.5" 52 | default: 53 | return "unknown" 54 | } 55 | } 56 | 57 | type UserInfo struct { 58 | SyncVersion SyncVersion 59 | User string 60 | } 61 | 62 | func ParseToken(userToken string) (token *UserInfo, err error) { 63 | claims := UserToken{} 64 | _, _, err = (&jwt.Parser{}).ParseUnverified(userToken, &claims) 65 | 66 | if err != nil { 67 | return nil, fmt.Errorf("can't parse token %v", err) 68 | } 69 | 70 | if !claims.VerifyExpiresAt(time.Now().Unix(), false) { 71 | return nil, errors.New("token Expired") 72 | } 73 | 74 | token = &UserInfo{ 75 | User: claims.Auth0.Email, 76 | SyncVersion: Version10, 77 | } 78 | 79 | scopes := strings.Fields(claims.Scopes) 80 | 81 | for _, scope := range scopes { 82 | switch scope { 83 | case "sync:fox", "sync:tortoise", "sync:hare": 84 | token.SyncVersion = Version15 85 | return 86 | } 87 | } 88 | return token, nil 89 | } 90 | 91 | // CreateApiCtx initializes an instance of ApiCtx 92 | func CreateApiCtx(httpCtx *transport.HttpClientCtx, syncVerison SyncVersion) (ctx ApiCtx, err error) { 93 | switch syncVerison { 94 | case Version10: 95 | return sync10.CreateCtx(httpCtx) 96 | case Version15: 97 | return sync15.CreateCtx(httpCtx) 98 | default: 99 | log.Fatal("Unsupported sync version") 100 | } 101 | return 102 | } 103 | -------------------------------------------------------------------------------- /cloud/client.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "io" 7 | "net/http" 8 | "net/url" 9 | 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | const defaultUserAgent = "rmapi" 14 | const defaultBaseURL = "https://document-storage-production-dot-remarkable-production.appspot.com" 15 | 16 | // A Client manages communication with the Remarkable Cloud API. 17 | type Client struct { 18 | // By making the base URL configurable we can make it 19 | // testable by passing the URL of a httptest.Server. 20 | // That also means that the Client is acting upon a same base URL for all requests. 21 | BaseURL *url.URL 22 | 23 | UserAgent string 24 | 25 | // The cloud package does not directly handle authentication. 26 | // Instead, when creating a new client, pass an http.Client that 27 | // can handle authentication for you. 28 | // The easiest and recommended way to do this is using the auth package. 29 | httpClient *http.Client 30 | } 31 | 32 | // NewClient instanciates and configures the default URL and 33 | // user agent for the http client. 34 | func NewClient(httpClient *http.Client) *Client { 35 | url, _ := url.Parse(defaultBaseURL) 36 | 37 | return &Client{ 38 | httpClient: httpClient, 39 | UserAgent: defaultUserAgent, 40 | BaseURL: url, 41 | } 42 | } 43 | 44 | // newRequest creates an http.Request with a method, a relative url path 45 | // and a payload. Query string parameters are not handled. 46 | func (c *Client) newRequest(method, path string, body interface{}) (*http.Request, error) { 47 | rel := &url.URL{Path: path} 48 | url := c.BaseURL.ResolveReference(rel) 49 | 50 | var buf io.ReadWriter 51 | if body != nil { 52 | buf = new(bytes.Buffer) 53 | err := json.NewEncoder(buf).Encode(body) 54 | if err != nil { 55 | return nil, errors.Wrap(err, "can't encode payload") 56 | } 57 | } 58 | 59 | req, err := http.NewRequest(method, url.String(), buf) 60 | if err != nil { 61 | return nil, errors.Wrapf(err, "can't create request: %s", url.String()) 62 | } 63 | 64 | if body != nil { 65 | req.Header.Set("Content-Type", "application/json") 66 | } 67 | 68 | req.Header.Set("Accept", "application/json") 69 | req.Header.Set("User-Agent", c.UserAgent) 70 | 71 | return req, nil 72 | } 73 | 74 | // do proceeds to the execution of the request and fills v with the answer. 75 | func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { 76 | resp, err := c.httpClient.Do(req) 77 | if err != nil { 78 | return nil, errors.Wrap(err, "can't execute request") 79 | } 80 | defer resp.Body.Close() 81 | 82 | if v != nil { 83 | if err := json.NewDecoder(resp.Body).Decode(v); err != nil { 84 | return nil, errors.Wrap(err, "can't decode response content") 85 | } 86 | } 87 | 88 | return resp, nil 89 | } 90 | -------------------------------------------------------------------------------- /shell/rmfs_completer.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "strings" 7 | 8 | "github.com/juruen/rmapi/log" 9 | "github.com/juruen/rmapi/model" 10 | ) 11 | 12 | func prefixToNodeDir(ctx *ShellCtxt, s []string) (*model.Node, string) { 13 | node := ctx.node 14 | isPrefix := len(s) > 0 && s[len(s)-1] != "" 15 | 16 | log.Trace.Println("isPrefix", isPrefix) 17 | 18 | if !isPrefix { 19 | return node, "" 20 | } 21 | 22 | prefix := unescapeSpaces(s[len(s)-1]) 23 | 24 | log.Trace.Println("prefix", prefix) 25 | 26 | node, err := ctx.api.Filetree().NodeByPath(prefix, ctx.node) 27 | 28 | // Prefix matches an entry 29 | if err == nil { 30 | if node.IsDirectory() { 31 | if strings.HasSuffix(prefix, "/") { 32 | return node, prefix 33 | } else { 34 | return node.Parent, path.Dir(prefix) 35 | } 36 | } 37 | } 38 | 39 | base := path.Base(prefix) 40 | 41 | log.Trace.Println("base", base) 42 | 43 | if base == prefix { 44 | return ctx.node, "" 45 | } 46 | 47 | dir := path.Dir(prefix) 48 | 49 | log.Trace.Println("dir", dir) 50 | 51 | node, err = ctx.api.Filetree().NodeByPath(dir, ctx.node) 52 | 53 | if err != nil { 54 | return nil, "" 55 | } 56 | 57 | return node, dir 58 | } 59 | 60 | type nodeCheckFn func(*model.Node) bool 61 | 62 | func createDirCompleter(ctx *ShellCtxt) func([]string) []string { 63 | return createCompleter(ctx, func(n *model.Node) bool { return n.IsDirectory() }) 64 | } 65 | 66 | func createFileCompleter(ctx *ShellCtxt) func([]string) []string { 67 | return createCompleter(ctx, func(n *model.Node) bool { return n.IsFile() }) 68 | } 69 | 70 | func createEntryCompleter(ctx *ShellCtxt) func([]string) []string { 71 | return createCompleter(ctx, func(n *model.Node) bool { return true }) 72 | } 73 | 74 | func createCompleter(ctx *ShellCtxt, check nodeCheckFn) func([]string) []string { 75 | return func(s []string) []string { 76 | options := make([]string, 0) 77 | 78 | log.Trace.Println("completer:", s, len(s)) 79 | 80 | node, dir := prefixToNodeDir(ctx, s) 81 | 82 | if node == nil { 83 | return options 84 | } 85 | 86 | for _, n := range node.Children { 87 | if !check(n) { 88 | continue 89 | } 90 | 91 | var entry string 92 | if n.IsDirectory() { 93 | entry = fmt.Sprintf("%s/", n.Name()) 94 | } else { 95 | entry = fmt.Sprintf("%s", n.Name()) 96 | } 97 | 98 | if dir != "" { 99 | if !strings.HasSuffix(dir, "/") { 100 | dir += "/" 101 | } 102 | entry = fmt.Sprintf("%s%s", dir, entry) 103 | } 104 | 105 | entry = escapeSpaces(entry) 106 | 107 | options = append(options, entry) 108 | } 109 | 110 | log.Trace.Println("options", options) 111 | 112 | return options 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /shell/fs_completer.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path" 8 | "strings" 9 | 10 | "github.com/juruen/rmapi/log" 11 | ) 12 | 13 | func prefixToDir(s []string) string { 14 | isPrefix := len(s) > 0 && s[len(s)-1] != "" 15 | 16 | log.Trace.Println("isPrefix", isPrefix) 17 | 18 | if !isPrefix { 19 | return "./" 20 | } 21 | 22 | prefix := unescapeSpaces(s[len(s)-1]) 23 | 24 | log.Trace.Println("prefix", prefix) 25 | 26 | fstat, err := os.Stat(prefix) 27 | 28 | // Prefix matches an entry 29 | if err == nil { 30 | if fstat.IsDir() { 31 | if strings.HasSuffix(prefix, "/") { 32 | return prefix 33 | } else { 34 | return path.Dir(prefix) 35 | } 36 | } 37 | } 38 | 39 | base := path.Base(prefix) 40 | 41 | log.Trace.Println("base", base) 42 | 43 | if base == prefix { 44 | return "" 45 | } 46 | 47 | dir := path.Dir(prefix) 48 | 49 | log.Trace.Println("dir", dir) 50 | 51 | fstat, err = os.Stat(dir) 52 | 53 | if err != nil || !fstat.IsDir() { 54 | return "" 55 | } 56 | 57 | return dir 58 | } 59 | 60 | type fileCheckFn func(os.FileInfo) bool 61 | 62 | func createFsDirCompleter(ctx *ShellCtxt) func([]string) []string { 63 | return createFsCompleter(func(e os.FileInfo) bool { return e.IsDir() }) 64 | } 65 | 66 | func createFsFileCompleter(ctx *ShellCtxt) func([]string) []string { 67 | return createFsCompleter(func(e os.FileInfo) bool { return !e.IsDir() }) 68 | } 69 | 70 | func createFsEntryCompleter() func([]string) []string { 71 | return createFsCompleter(func(e os.FileInfo) bool { return true }) 72 | } 73 | 74 | func createFsCompleter(check fileCheckFn) func([]string) []string { 75 | return func(s []string) []string { 76 | options := make([]string, 0) 77 | 78 | log.Trace.Println("completer:", len(s)) 79 | 80 | dir := prefixToDir(s) 81 | 82 | if dir == "" { 83 | return options 84 | } 85 | 86 | entries, err := ioutil.ReadDir(dir) 87 | 88 | if err != nil { 89 | return options 90 | } 91 | 92 | for _, n := range entries { 93 | if !check(n) { 94 | continue 95 | } 96 | 97 | var entry string 98 | if info, err := os.Stat(dir + "/" + n.Name()); err == nil && info.IsDir() { 99 | entry = fmt.Sprintf("%s/", n.Name()) 100 | } else { 101 | entry = fmt.Sprintf("%s", n.Name()) 102 | } 103 | 104 | if dir != "" { 105 | if !strings.HasSuffix(dir, "/") { 106 | dir += "/" 107 | } 108 | entry = fmt.Sprintf("%s%s", dir, entry) 109 | } 110 | 111 | entry = escapeSpaces(entry) 112 | 113 | if !n.IsDir() && !strings.HasSuffix(entry, ".pdf") && !strings.HasSuffix(entry, ".epub") { 114 | continue 115 | } 116 | 117 | options = append(options, entry) 118 | } 119 | 120 | log.Trace.Println("options", options) 121 | 122 | return options 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "runtime" 9 | "sync" 10 | "testing" 11 | 12 | "github.com/juruen/rmapi/model" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestSaveLoadConfig(t *testing.T) { 17 | tokens := model.AuthTokens{ 18 | DeviceToken: "foo", 19 | UserToken: "bar", 20 | } 21 | 22 | f, err := ioutil.TempFile("", "rmapitmp") 23 | 24 | if err != nil { 25 | panic(fmt.Sprintln("failed to create temp file")) 26 | } 27 | 28 | path := f.Name() 29 | 30 | defer os.Remove(path) 31 | 32 | SaveTokens(path, tokens) 33 | 34 | savedTokens := LoadTokens(path) 35 | 36 | assert.Equal(t, "foo", savedTokens.DeviceToken) 37 | assert.Equal(t, "bar", savedTokens.UserToken) 38 | } 39 | 40 | func TestConfigPath(t *testing.T) { 41 | // let's not mess with the user's home dir 42 | home := "HOME" 43 | switch runtime.GOOS { 44 | case "windows": 45 | home = "USERPROFILE" 46 | case "plan9": 47 | home = "home" 48 | } 49 | if err := os.Setenv(home, os.TempDir()); err != nil { 50 | t.Error(err) 51 | } 52 | 53 | tearDown := func() { 54 | _ = os.Unsetenv(configFileEnvVar) 55 | _ = os.Remove(filepath.Join(os.TempDir(), defaultConfigFile)) 56 | } 57 | 58 | tests := []struct { 59 | name string 60 | setup func() 61 | want string 62 | wantErr bool 63 | }{ 64 | { 65 | name: "no home no env config exists", 66 | setup: func() {}, 67 | want: func() string { 68 | xdgConfigDir, err := os.UserConfigDir() 69 | if err != nil { 70 | t.Error(err) 71 | } 72 | xdgConfig := filepath.Join(xdgConfigDir, appName, defaultConfigFileXDG) 73 | return xdgConfig 74 | }(), 75 | }, 76 | { 77 | name: "home config exists", 78 | setup: func() { 79 | homeConfig := filepath.Join(os.TempDir(), defaultConfigFile) 80 | if err := ioutil.WriteFile(homeConfig, []byte("test"), 0644); err != nil { 81 | t.Error(err) 82 | } 83 | }, 84 | want: filepath.Join(os.TempDir(), defaultConfigFile), 85 | }, 86 | { 87 | name: "env config exists", 88 | setup: func() { 89 | if err := os.Setenv(configFileEnvVar, filepath.Join(os.TempDir(), "rmapi.yaml")); err != nil { 90 | t.Error(err) 91 | } 92 | }, 93 | want: filepath.Join(os.TempDir(), "rmapi.yaml"), 94 | }, 95 | } 96 | 97 | // Can't allow parallel execution because of shared file state 98 | wg := sync.WaitGroup{} 99 | for _, tt := range tests { 100 | wg.Add(1) 101 | t.Run(tt.name, func(t *testing.T) { 102 | defer wg.Done() 103 | defer tearDown() 104 | tt.setup() 105 | 106 | got, err := ConfigPath() 107 | if (err != nil) != tt.wantErr { 108 | t.Errorf("ConfigPath() error = %v, wantErr %v", err, tt.wantErr) 109 | return 110 | } 111 | if got != tt.want { 112 | t.Errorf("ConfigPath() = %v, want %v", got, tt.want) 113 | } 114 | }) 115 | wg.Wait() 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /api/auth.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/google/uuid" 10 | "github.com/juruen/rmapi/config" 11 | "github.com/juruen/rmapi/log" 12 | "github.com/juruen/rmapi/model" 13 | "github.com/juruen/rmapi/transport" 14 | ) 15 | 16 | const ( 17 | defaultDeviceDesc string = "desktop-linux" 18 | ) 19 | 20 | func AuthHttpCtx(reAuth, nonInteractive bool) *transport.HttpClientCtx { 21 | configPath, err := config.ConfigPath() 22 | if err != nil { 23 | log.Error.Fatal("failed to get config path") 24 | } 25 | authTokens := config.LoadTokens(configPath) 26 | httpClientCtx := transport.CreateHttpClientCtx(authTokens) 27 | 28 | if authTokens.DeviceToken == "" { 29 | if nonInteractive { 30 | log.Error.Fatal("missing token, not asking, aborting") 31 | } 32 | deviceToken, err := newDeviceToken(&httpClientCtx, readCode()) 33 | 34 | if err != nil { 35 | log.Error.Fatal("failed to crete device token from on-time code") 36 | } 37 | 38 | log.Trace.Println("device token", deviceToken) 39 | 40 | authTokens.DeviceToken = deviceToken 41 | httpClientCtx.Tokens.DeviceToken = deviceToken 42 | 43 | config.SaveTokens(configPath, authTokens) 44 | } 45 | 46 | if authTokens.UserToken == "" || reAuth { 47 | userToken, err := newUserToken(&httpClientCtx) 48 | 49 | if err == transport.ErrUnauthorized { 50 | log.Trace.Println("Invalid deviceToken, resetting") 51 | authTokens.DeviceToken = "" 52 | } else if err != nil { 53 | log.Error.Fatalln("failed to create user token from device token", err) 54 | } 55 | 56 | log.Trace.Println("user token:", userToken) 57 | 58 | authTokens.UserToken = userToken 59 | httpClientCtx.Tokens.UserToken = userToken 60 | 61 | config.SaveTokens(configPath, authTokens) 62 | } 63 | 64 | return &httpClientCtx 65 | } 66 | 67 | func readCode() string { 68 | reader := bufio.NewReader(os.Stdin) 69 | fmt.Print("Enter one-time code (go to https://my.remarkable.com/device/desktop/connect): ") 70 | code, _ := reader.ReadString('\n') 71 | 72 | code = strings.TrimSuffix(code, "\n") 73 | code = strings.TrimSuffix(code, "\r") 74 | 75 | if len(code) != 8 { 76 | log.Error.Println("Code has the wrong length, it should be 8") 77 | return readCode() 78 | } 79 | 80 | return code 81 | } 82 | 83 | func newDeviceToken(http *transport.HttpClientCtx, code string) (string, error) { 84 | uuid := uuid.New() 85 | 86 | req := model.DeviceTokenRequest{code, defaultDeviceDesc, uuid.String()} 87 | 88 | resp := transport.BodyString{} 89 | err := http.Post(transport.EmptyBearer, config.NewTokenDevice, req, &resp) 90 | 91 | if err != nil { 92 | log.Error.Fatal("failed to create a new device token") 93 | return "", err 94 | } 95 | 96 | return resp.Content, nil 97 | } 98 | 99 | func newUserToken(http *transport.HttpClientCtx) (string, error) { 100 | resp := transport.BodyString{} 101 | err := http.Post(transport.DeviceBearer, config.NewUserDevice, nil, &resp) 102 | 103 | if err != nil { 104 | return "", err 105 | } 106 | 107 | return resp.Content, nil 108 | } 109 | -------------------------------------------------------------------------------- /docs/tutorial-print-macosx.md: -------------------------------------------------------------------------------- 1 | # How to directly print to your ReMarkable on Mac 2 | 3 | This tuorial wil show you how to leverage [rmapi](https://github.com/juruen/rmapi) and `Automator` to print 4 | to your ReMarkable tablet from your Mac using the Cloud API. 5 | 6 | This way you won't need to take the extra step of using the desktop app. 7 | 8 | You can see a demo of it [here](https://youtu.be/gOGTYI15VxY). 9 | 10 | *Warning*: If you installed _rmapi_ before (October 1, 2018), you will need to install the latest version, 11 | otherwise _rmapi_ will fail to authenticate after the changes in the server API that were introduced in September 28, 2018. 12 | 13 | # Steps 14 | 15 | ## Open a terminal to download `rmapi` 16 | 17 | Use `terminal` or `iterm` to get a terminal to run commands from it. 18 | 19 | Download `rmapi` with the following command: 20 | 21 | ```bash 22 | curl -L https://github.com/juruen/rmapi/releases/download/v0.0.25/rmapi-macosx.zip -o rmapi.zip 23 | ``` 24 | 25 | Alternatively, you can build it from sources. 26 | 27 | ## Unzip `rmapi.zip` 28 | 29 | Unzip the downloaded file: 30 | 31 | ```bash 32 | unzip rmapi.zip 33 | ``` 34 | 35 | ## Run `rmapi` for first time 36 | 37 | You need to run `rmapi` once to create the device and user token. 38 | 39 | Run it with: 40 | 41 | ```bash 42 | ./rmapi 43 | ``` 44 | 45 | The first time you run it, it will ask you to go to `https://my.remarkable.com/` to enter a new activation code. 46 | 47 | You will see a prompt like this where you just need to introduce the activation code. 48 | 49 | ```bash 50 | Enter one-time code (go to https://my.remarkable.com/device/desktop/connect): 51 | ``` 52 | 53 | If everything goes OK, you wil have access to the shell: 54 | 55 | ```bash 56 | ReMarkable Cloud API Shell 57 | [/]> 58 | ``` 59 | 60 | You don't need to interact with it, if you don't need to, you can type `exit` and press return to leave it. 61 | 62 | ```bash 63 | ReMarkable Cloud API Shell 64 | [/]>exit 65 | ``` 66 | 67 | If you are curious about the shell functionality, you can type `help` to see the available commands. 68 | 69 | ## Write down where `rmapi` is installed 70 | 71 | If you haven't moved the file anywhere else, it will be in `/Users/YOUR_USER_NAME/rmapi`. 72 | 73 | It is good practice to have it copied in other directory, but for simplicity, we don't do that here. 74 | 75 | 76 | ## Create `Automator` script 77 | 78 | Run the `Automator` app and create a new `Print Plugin` document as shown below: 79 | 80 | ![Automator I](create-print-plugin.png) 81 | 82 | Select a `Run Shell Script` action: 83 | 84 | ![Automator II](run-shell-script-1.png) 85 | 86 | Change `Pass input` from `to stdin` to `as arguments` and type in the following content: 87 | 88 | ``` 89 | for f in "$@" 90 | do 91 | /Users/javier/rmapi put "$f" 92 | done 93 | ``` 94 | 95 | Please, note that you will have to adjust your `/Users/javier/rmapi` path to match your user or whatever path you have choosen. 96 | 97 | 98 | ![Automator III](run-shell-script-2.png) 99 | 100 | Go to file and save your plugin as `Print to ReMarkable`. 101 | 102 | ## Use it 103 | 104 | Go to any application that supports printing and open the print dialog. Note that `Chrome` is slightly different because it has its own print dialog. From `Chrome` system print dialog can be triggered with ⌘+option+P. 105 | 106 | In the bottom-left corner there's a `PDF` menu that you can click, and one of the options you should see is `Print to ReMarkable`. If you click it, you should see your document uploaded to your tablet in a few seconds. 107 | 108 | ![Print Dialog](print-dialog.png) 109 | -------------------------------------------------------------------------------- /shell/mget.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "fmt" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | "time" 11 | 12 | "github.com/abiosoft/ishell" 13 | "github.com/juruen/rmapi/filetree" 14 | "github.com/juruen/rmapi/model" 15 | ) 16 | 17 | func mgetCmd(ctx *ShellCtxt) *ishell.Cmd { 18 | return &ishell.Cmd{ 19 | Name: "mget", 20 | Help: "recursively copy remote directory to local", 21 | Completer: createDirCompleter(ctx), 22 | Func: func(c *ishell.Context) { 23 | flagSet := flag.NewFlagSet("mget", flag.ContinueOnError) 24 | incremental := flagSet.Bool("i", false, "incremental") 25 | outputDir := flagSet.String("o", ".", "output folder") 26 | removeDeleted := flagSet.Bool("d", false, "remove deleted/moved") 27 | 28 | if err := flagSet.Parse(c.Args); err != nil { 29 | if err != flag.ErrHelp { 30 | c.Err(err) 31 | } 32 | return 33 | } 34 | 35 | target := path.Clean(*outputDir) 36 | if *removeDeleted && target == "." { 37 | c.Err(fmt.Errorf("set a folder explictly with the -o flag when removing deleted (and not .)")) 38 | return 39 | } 40 | 41 | argRest := flagSet.Args() 42 | if len(argRest) == 0 { 43 | c.Err(errors.New(("missing source dir"))) 44 | return 45 | } 46 | srcName := argRest[0] 47 | 48 | node, err := ctx.api.Filetree().NodeByPath(srcName, ctx.node) 49 | 50 | if err != nil || node.IsFile() { 51 | c.Err(errors.New("directory doesn't exist")) 52 | return 53 | } 54 | 55 | fileMap := make(map[string]struct{}) 56 | fileMap[target] = struct{}{} 57 | 58 | visitor := filetree.FileTreeVistor{ 59 | func(currentNode *model.Node, currentPath []string) bool { 60 | idxDir := 0 61 | if srcName == "." && len(currentPath) > 0 { 62 | idxDir = 1 63 | } 64 | 65 | fileName := currentNode.Name() + ".zip" 66 | 67 | dst := path.Join(target, filetree.BuildPath(currentPath[idxDir:], fileName)) 68 | fileMap[dst] = struct{}{} 69 | 70 | dir := path.Dir(dst) 71 | fileMap[dir] = struct{}{} 72 | 73 | os.MkdirAll(dir, 0766) 74 | 75 | if currentNode.IsDirectory() { 76 | return filetree.ContinueVisiting 77 | } 78 | 79 | lastModified, err := currentNode.LastModified() 80 | if err != nil { 81 | fmt.Printf("%v for %s\n", err, dst) 82 | lastModified = time.Now() 83 | } 84 | 85 | if *incremental { 86 | stat, err := os.Stat(dst) 87 | if err == nil { 88 | localMod := stat.ModTime() 89 | 90 | if !lastModified.After(localMod) { 91 | return filetree.ContinueVisiting 92 | } 93 | } 94 | } 95 | 96 | c.Printf("downloading [%s]...", dst) 97 | 98 | err = ctx.api.FetchDocument(currentNode.Document.ID, dst) 99 | 100 | if err == nil { 101 | c.Println(" OK") 102 | 103 | err = os.Chtimes(dst, lastModified, lastModified) 104 | if err != nil { 105 | c.Err(fmt.Errorf("cant set lastModified for %s", dst)) 106 | } 107 | return filetree.ContinueVisiting 108 | } 109 | 110 | c.Err(fmt.Errorf("Failed to download file %s", currentNode.Name())) 111 | 112 | return filetree.ContinueVisiting 113 | }, 114 | } 115 | 116 | filetree.WalkTree(node, visitor) 117 | 118 | if *removeDeleted { 119 | filepath.Walk(target, func(path string, info os.FileInfo, err error) error { 120 | if err != nil { 121 | c.Err(fmt.Errorf("can't read %s %v", path, err)) 122 | return nil 123 | } 124 | //just to be sure 125 | if path == target { 126 | return nil 127 | } 128 | if _, ok := fileMap[path]; !ok { 129 | var err error 130 | if info.IsDir() { 131 | c.Println("Removing folder ", path) 132 | err = os.RemoveAll(path) 133 | if err != nil { 134 | c.Err(err) 135 | } 136 | return filepath.SkipDir 137 | } 138 | 139 | c.Println("Removing ", path) 140 | err = os.Remove(path) 141 | if err != nil { 142 | c.Err(err) 143 | } 144 | } 145 | return nil 146 | }) 147 | } 148 | }, 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /encoding/rm/unmarshal.go: -------------------------------------------------------------------------------- 1 | package rm 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | ) 8 | 9 | // UnmarshalBinary implements encoding.UnmarshalBinary for 10 | // transforming bytes into a Rm page 11 | func (rm *Rm) UnmarshalBinary(data []byte) error { 12 | r := newReader(data) 13 | if err := r.checkHeader(); err != nil { 14 | return err 15 | } 16 | rm.Version = r.version 17 | 18 | nbLayers, err := r.readNumber() 19 | if err != nil { 20 | return err 21 | } 22 | 23 | rm.Layers = make([]Layer, nbLayers) 24 | for i := uint32(0); i < nbLayers; i++ { 25 | nbLines, err := r.readNumber() 26 | if err != nil { 27 | return err 28 | } 29 | 30 | rm.Layers[i].Lines = make([]Line, nbLines) 31 | for j := uint32(0); j < nbLines; j++ { 32 | line, err := r.readLine() 33 | if err != nil { 34 | return err 35 | } 36 | rm.Layers[i].Lines[j] = line 37 | } 38 | } 39 | 40 | return nil 41 | } 42 | 43 | type reader struct { 44 | bytes.Reader 45 | version Version 46 | } 47 | 48 | func newReader(data []byte) reader { 49 | br := bytes.NewReader(data) 50 | 51 | // we set V5 as default but the real value is 52 | // analysed when checking the header 53 | return reader{*br, V5} 54 | } 55 | 56 | func (r *reader) checkHeader() error { 57 | buf := make([]byte, HeaderLen) 58 | 59 | n, err := r.Read(buf) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | if n != HeaderLen { 65 | return fmt.Errorf("Wrong header size") 66 | } 67 | 68 | switch string(buf) { 69 | case HeaderV5: 70 | r.version = V5 71 | case HeaderV3: 72 | r.version = V3 73 | default: 74 | return fmt.Errorf("Unknown header") 75 | } 76 | 77 | return nil 78 | } 79 | 80 | func (r *reader) readNumber() (uint32, error) { 81 | var nb uint32 82 | if err := binary.Read(r, binary.LittleEndian, &nb); err != nil { 83 | return 0, fmt.Errorf("Wrong number read") 84 | } 85 | return nb, nil 86 | } 87 | 88 | func (r *reader) readLine() (Line, error) { 89 | var line Line 90 | 91 | if err := binary.Read(r, binary.LittleEndian, &line.BrushType); err != nil { 92 | return line, fmt.Errorf("Failed to read line") 93 | } 94 | 95 | if err := binary.Read(r, binary.LittleEndian, &line.BrushColor); err != nil { 96 | return line, fmt.Errorf("Failed to read line") 97 | } 98 | 99 | if err := binary.Read(r, binary.LittleEndian, &line.Padding); err != nil { 100 | return line, fmt.Errorf("Failed to read line") 101 | } 102 | 103 | if err := binary.Read(r, binary.LittleEndian, &line.BrushSize); err != nil { 104 | return line, fmt.Errorf("Failed to read line") 105 | } 106 | 107 | // this new attribute has been added in v5 108 | if r.version == V5 { 109 | if err := binary.Read(r, binary.LittleEndian, &line.Unknown); err != nil { 110 | return line, fmt.Errorf("Failed to read line") 111 | } 112 | } 113 | 114 | nbPoints, err := r.readNumber() 115 | if err != nil { 116 | return line, err 117 | } 118 | 119 | if nbPoints == 0 { 120 | return line, nil 121 | } 122 | 123 | line.Points = make([]Point, nbPoints) 124 | 125 | for i := uint32(0); i < nbPoints; i++ { 126 | p, err := r.readPoint() 127 | if err != nil { 128 | return line, err 129 | } 130 | 131 | line.Points[i] = p 132 | } 133 | 134 | return line, nil 135 | } 136 | 137 | func (r *reader) readPoint() (Point, error) { 138 | var point Point 139 | 140 | if err := binary.Read(r, binary.LittleEndian, &point.X); err != nil { 141 | return point, fmt.Errorf("Failed to read point") 142 | } 143 | if err := binary.Read(r, binary.LittleEndian, &point.Y); err != nil { 144 | return point, fmt.Errorf("Failed to read point") 145 | } 146 | if err := binary.Read(r, binary.LittleEndian, &point.Speed); err != nil { 147 | return point, fmt.Errorf("Failed to read point") 148 | } 149 | if err := binary.Read(r, binary.LittleEndian, &point.Direction); err != nil { 150 | return point, fmt.Errorf("Failed to read point") 151 | } 152 | if err := binary.Read(r, binary.LittleEndian, &point.Width); err != nil { 153 | return point, fmt.Errorf("Failed to read point") 154 | } 155 | if err := binary.Read(r, binary.LittleEndian, &point.Pressure); err != nil { 156 | return point, fmt.Errorf("Failed to read point") 157 | } 158 | 159 | return point, nil 160 | } 161 | -------------------------------------------------------------------------------- /filetree/filetree.go: -------------------------------------------------------------------------------- 1 | package filetree 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/juruen/rmapi/model" 7 | "github.com/juruen/rmapi/util" 8 | ) 9 | 10 | type FileTreeCtx struct { 11 | root *model.Node 12 | idToNode map[string]*model.Node 13 | pendingParent map[string]map[string]struct{} 14 | } 15 | 16 | type FileTreeVistor struct { 17 | Visit func(node *model.Node, path []string) bool 18 | } 19 | 20 | func (ctx *FileTreeCtx) Clear() { 21 | ctx.root.Children = nil 22 | } 23 | 24 | func CreateFileTreeCtx() FileTreeCtx { 25 | root := model.CreateNode(model.Document{ 26 | ID: "", 27 | Type: "CollectionType", 28 | VissibleName: "/", 29 | }) 30 | 31 | return FileTreeCtx{ 32 | &root, 33 | make(map[string]*model.Node), 34 | make(map[string]map[string]struct{}), 35 | } 36 | } 37 | 38 | func (ctx *FileTreeCtx) Root() *model.Node { 39 | return ctx.root 40 | } 41 | 42 | func (ctx *FileTreeCtx) NodeById(id string) *model.Node { 43 | if len(id) == 0 { 44 | return ctx.Root() 45 | } 46 | 47 | if n, ok := ctx.idToNode[id]; ok { 48 | return n 49 | } else { 50 | return nil 51 | } 52 | } 53 | 54 | func (ctx *FileTreeCtx) AddDocument(document *model.Document) { 55 | node := model.CreateNode(*document) 56 | nodeId := document.ID 57 | parentId := document.Parent 58 | 59 | ctx.idToNode[nodeId] = &node 60 | 61 | if parentId == "" { 62 | // This is a node whose parent is root 63 | node.Parent = ctx.root 64 | ctx.root.Children[nodeId] = &node 65 | } else if parentNode, ok := ctx.idToNode[parentId]; ok { 66 | // Parent node already processed 67 | node.Parent = parentNode 68 | parentNode.Children[nodeId] = &node 69 | } else { 70 | // Parent node hasn't been processed yet 71 | if _, ok := ctx.pendingParent[parentId]; !ok { 72 | ctx.pendingParent[parentId] = make(map[string]struct{}) 73 | } 74 | ctx.pendingParent[parentId][nodeId] = struct{}{} 75 | } 76 | 77 | // Resolve pendingChildren 78 | if pendingChildren, ok := ctx.pendingParent[nodeId]; ok { 79 | for id := range pendingChildren { 80 | ctx.idToNode[id].Parent = &node 81 | node.Children[id] = ctx.idToNode[id] 82 | } 83 | delete(ctx.pendingParent, nodeId) 84 | } 85 | } 86 | 87 | func (ctx *FileTreeCtx) DeleteNode(node *model.Node) { 88 | if node.IsRoot() { 89 | return 90 | } 91 | 92 | delete(node.Parent.Children, node.Id()) 93 | } 94 | 95 | func (ctx *FileTreeCtx) MoveNode(src, dst *model.Node) { 96 | if src.IsRoot() { 97 | return 98 | } 99 | 100 | src.Document.VissibleName = dst.Document.VissibleName 101 | src.Document.Version = dst.Document.Version 102 | src.Document.ModifiedClient = dst.Document.ModifiedClient 103 | 104 | if src.Parent != dst.Parent { 105 | delete(src.Parent.Children, src.Id()) 106 | src.Parent = dst.Parent 107 | dst.Parent.Children[src.Id()] = src 108 | } 109 | } 110 | 111 | func (ctx *FileTreeCtx) NodeByPath(path string, current *model.Node) (*model.Node, error) { 112 | if current == nil { 113 | current = ctx.Root() 114 | } 115 | 116 | entries := util.SplitPath(path) 117 | 118 | if len(entries) == 0 { 119 | return current, nil 120 | } 121 | 122 | i := 0 123 | if entries[i] == "" { 124 | current = ctx.Root() 125 | i++ 126 | } 127 | 128 | for i < len(entries) { 129 | if entries[i] == "" || entries[i] == "." { 130 | i++ 131 | continue 132 | } 133 | 134 | if entries[i] == ".." { 135 | if current.Parent == nil { 136 | current = ctx.Root() 137 | } else { 138 | current = current.Parent 139 | } 140 | 141 | i++ 142 | continue 143 | } 144 | 145 | var err error 146 | current, err = current.FindByName(entries[i]) 147 | 148 | if err != nil { 149 | return nil, err 150 | } 151 | 152 | i++ 153 | } 154 | 155 | return current, nil 156 | } 157 | 158 | func (ctx *FileTreeCtx) NodeToPath(targetNode *model.Node) (string, error) { 159 | resultPath := "" 160 | found := false 161 | 162 | visitor := FileTreeVistor{ 163 | func(currentNode *model.Node, path []string) bool { 164 | if targetNode != currentNode { 165 | return ContinueVisiting 166 | } 167 | 168 | found = true 169 | resultPath = BuildPath(path, currentNode.Name()) 170 | return StopVisiting 171 | }, 172 | } 173 | 174 | WalkTree(ctx.root, visitor) 175 | 176 | if found { 177 | return resultPath, nil 178 | } else { 179 | return "", errors.New("entry not found") 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /model/document.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/google/uuid" 7 | ) 8 | 9 | const ( 10 | DirectoryType = "CollectionType" 11 | DocumentType = "DocumentType" 12 | ) 13 | 14 | type Document struct { 15 | ID string 16 | Version int 17 | Message string 18 | Success bool 19 | BlobURLGet string 20 | BlobURLGetExpires string 21 | ModifiedClient string 22 | Type string 23 | VissibleName string 24 | CurrentPage int 25 | Bookmarked bool 26 | Parent string 27 | } 28 | 29 | type MetadataDocument struct { 30 | ID string 31 | Parent string 32 | VissibleName string `json:"visibleName"` 33 | Type string 34 | Version int 35 | ModifiedClient string 36 | } 37 | 38 | type DeleteDocument struct { 39 | ID string 40 | Version int 41 | } 42 | 43 | type UploadDocumentRequest struct { 44 | ID string 45 | Type string 46 | Version int 47 | } 48 | 49 | type UploadDocumentResponse struct { 50 | ID string 51 | Version int 52 | Message string 53 | Success bool 54 | BlobURLPut string 55 | BlobURLPutExpires string 56 | } 57 | 58 | type BlobRootStorageRequest struct { 59 | Method string `json:"http_method"` 60 | Initial bool `json:"initial_sync,omitempty"` 61 | RelativePath string `json:"relative_path"` 62 | RootSchema string `json:"root_schema,omitempty"` 63 | Generation int64 `json:"generation"` 64 | } 65 | 66 | // BlobStorageRequest request 67 | type BlobStorageRequest struct { 68 | Method string `json:"http_method"` 69 | Initial bool `json:"initial_sync,omitempty"` 70 | RelativePath string `json:"relative_path"` 71 | ParentPath string `json:"parent_path,omitempty"` 72 | } 73 | 74 | // BlobStorageResponse response 75 | type BlobStorageResponse struct { 76 | Expires string `json:"expires"` 77 | Method string `json:"method"` 78 | RelativePath string `json:"relative_path"` 79 | Url string `json:"url"` 80 | MaxUploadSizeBytes int64 `json:"maxuploadsize_bytes,omitifempty"` 81 | } 82 | 83 | type RootRequest struct { 84 | Generation int64 `json:"generation"` 85 | Hash string `json:"hash"` 86 | } 87 | 88 | type PutRootRequest struct { 89 | Generation int64 `json:"generation"` 90 | Hash string `json:"hash"` 91 | Broadcast bool `json:"broadcast"` 92 | } 93 | 94 | // SyncCompleteRequest payload of the sync completion 95 | type SyncCompletedRequest struct { 96 | Generation int64 `json:"generation"` 97 | } 98 | 99 | func CreateDirDocument(parent, name string) MetadataDocument { 100 | id := uuid.New() 101 | 102 | return MetadataDocument{ 103 | ID: id.String(), 104 | Parent: parent, 105 | VissibleName: name, 106 | Type: DirectoryType, 107 | Version: 1, 108 | ModifiedClient: time.Now().UTC().Format(time.RFC3339Nano), 109 | } 110 | } 111 | 112 | func CreateUploadDocumentRequest(id string, entryType string) UploadDocumentRequest { 113 | if id == "" { 114 | newId := uuid.New() 115 | 116 | id = newId.String() 117 | } 118 | 119 | return UploadDocumentRequest{ 120 | id, 121 | entryType, 122 | 1, 123 | } 124 | } 125 | 126 | func CreateUploadDocumentMeta(id string, entryType, parent, name string) MetadataDocument { 127 | 128 | return MetadataDocument{ 129 | ID: id, 130 | Parent: parent, 131 | VissibleName: name, 132 | Type: entryType, 133 | Version: 1, 134 | ModifiedClient: time.Now().UTC().Format(time.RFC3339Nano), 135 | } 136 | } 137 | 138 | func (meta MetadataDocument) ToDocument() Document { 139 | return Document{ 140 | ID: meta.ID, 141 | Parent: meta.Parent, 142 | VissibleName: meta.VissibleName, 143 | Type: meta.Type, 144 | Version: 1, 145 | ModifiedClient: meta.ModifiedClient, 146 | } 147 | } 148 | 149 | func (doc Document) ToMetaDocument() MetadataDocument { 150 | return MetadataDocument{ 151 | ID: doc.ID, 152 | Parent: doc.Parent, 153 | VissibleName: doc.VissibleName, 154 | Type: doc.Type, 155 | Version: doc.Version, 156 | ModifiedClient: time.Now().UTC().Format(time.RFC3339Nano), 157 | } 158 | } 159 | 160 | func (doc Document) ToDeleteDocument() DeleteDocument { 161 | return DeleteDocument{ 162 | ID: doc.ID, 163 | Version: doc.Version, 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /archive/blob.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "archive/zip" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "io/ioutil" 10 | "os" 11 | "path" 12 | "path/filepath" 13 | "strings" 14 | 15 | "github.com/google/uuid" 16 | "github.com/juruen/rmapi/log" 17 | "github.com/juruen/rmapi/model" 18 | "github.com/juruen/rmapi/util" 19 | ) 20 | 21 | type NamePath struct { 22 | Name string 23 | Path string 24 | } 25 | 26 | type DocumentFiles struct { 27 | Files []NamePath 28 | } 29 | 30 | func (d *DocumentFiles) AddMap(name, filepath string) { 31 | fs := NamePath{ 32 | Name: name, 33 | Path: filepath, 34 | } 35 | d.Files = append(d.Files, fs) 36 | } 37 | 38 | // Prepare prepares a file for uploading (creates needed temp files or unpacks a zip) 39 | func Prepare(name, parentId, sourceDocPath, ext, tmpDir string) (files *DocumentFiles, id string, err error) { 40 | files = &DocumentFiles{} 41 | if ext == util.ZIP { 42 | var metadataPath string 43 | id, files, metadataPath, err = Unpack(sourceDocPath, tmpDir) 44 | if err != nil { 45 | return 46 | } 47 | if id == "" { 48 | return nil, "", errors.New("could not determine the Document UUID") 49 | } 50 | if metadataPath == "" { 51 | log.Warning.Println("missing metadata, creating...", name) 52 | objectName, filePath, err1 := CreateMetadata(id, name, parentId, model.DocumentType, tmpDir) 53 | if err1 != nil { 54 | err = err1 55 | return 56 | } 57 | files.AddMap(objectName, filePath) 58 | } else { 59 | err = FixMetadata(parentId, name, metadataPath) 60 | if err != nil { 61 | return 62 | } 63 | } 64 | } else { 65 | id = uuid.New().String() 66 | objectName := id + "." + ext 67 | doctype := ext 68 | var pageIds []string 69 | if ext == util.RM { 70 | pageId := uuid.New().String() 71 | objectName = fmt.Sprintf("%s/%s.rm", id, pageId) 72 | doctype = "notebook" 73 | pageIds = []string{pageId} 74 | } 75 | files.AddMap(objectName, sourceDocPath) 76 | objectName, filePath, err1 := CreateMetadata(id, name, parentId, model.DocumentType, tmpDir) 77 | if err1 != nil { 78 | err = err1 79 | return 80 | } 81 | files.AddMap(objectName, filePath) 82 | 83 | objectName, filePath, err = CreateContent(id, doctype, tmpDir, pageIds) 84 | if err != nil { 85 | return 86 | } 87 | files.AddMap(objectName, filePath) 88 | } 89 | return files, id, err 90 | } 91 | 92 | // FixMetadata fixes the metadata with the new parent and filename 93 | func FixMetadata(parentId, name, path string) error { 94 | meta := MetadataFile{} 95 | metaData, err := ioutil.ReadFile(path) 96 | if err != nil { 97 | return err 98 | } 99 | err = json.Unmarshal(metaData, &meta) 100 | if err != nil { 101 | return err 102 | } 103 | meta.Parent = parentId 104 | meta.DocName = name 105 | meta.LastModified = UnixTimestamp() 106 | 107 | metaData, err = json.Marshal(meta) 108 | if err != nil { 109 | return err 110 | } 111 | return ioutil.WriteFile(path, metaData, 0600) 112 | } 113 | 114 | // Unpack unpacks a rmapi .zip file 115 | func Unpack(src, dest string) (id string, files *DocumentFiles, metadataPath string, err error) { 116 | log.Info.Println("Unpacking in: ", dest) 117 | r, err := zip.OpenReader(src) 118 | if err != nil { 119 | return 120 | } 121 | defer r.Close() 122 | files = &DocumentFiles{} 123 | 124 | for _, f := range r.File { 125 | fname := f.Name 126 | 127 | if strings.HasSuffix(fname, ".content") { 128 | id = strings.TrimSuffix(fname, path.Ext(fname)) 129 | } 130 | // Store filename/path for returning and using later on 131 | fpath := filepath.Join(dest, f.Name) 132 | 133 | // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE 134 | if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { 135 | err = fmt.Errorf("%s: illegal file path", fpath) 136 | return 137 | } 138 | 139 | if f.FileInfo().IsDir() { 140 | // Make Folder 141 | os.MkdirAll(fpath, os.ModePerm) 142 | continue 143 | } else { 144 | files.AddMap(f.Name, fpath) 145 | } 146 | 147 | if strings.HasSuffix(fname, ".metadata") { 148 | metadataPath = fpath 149 | } 150 | 151 | // Make File 152 | if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil { 153 | return 154 | } 155 | 156 | outFile, err1 := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) 157 | if err1 != nil { 158 | err = err1 159 | return 160 | } 161 | 162 | rc, err1 := f.Open() 163 | if err != nil { 164 | err = err1 165 | return 166 | } 167 | 168 | _, err = io.Copy(outFile, rc) 169 | 170 | // Close the file without defer to close before next iteration of loop 171 | outFile.Close() 172 | rc.Close() 173 | 174 | if err != nil { 175 | return 176 | } 177 | } 178 | 179 | return id, files, metadataPath, nil 180 | } 181 | -------------------------------------------------------------------------------- /cloud/plumbing.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "net/http" 5 | "net/url" 6 | 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | // rawDocument represents a real object expected in most calls 11 | // by the Remarkable API. 12 | type rawDocument struct { 13 | ID string `json:"ID"` 14 | Version int `json:"Version"` 15 | Message string `json:"Message"` 16 | Success bool `json:"Success"` 17 | BlobURLGet string `json:"BlobURLGet"` 18 | BlobURLGetExpires string `json:"BlobURLGetExpires"` 19 | BlobURLPut string `json:"BlobURLPut"` 20 | BlobURLPutExpires string `json:"BlobURLPutExpires"` 21 | ModifiedClient string `json:"ModifiedClient"` 22 | Type string `json:"Type"` 23 | VissibleName string `json:"VissibleName"` 24 | CurrentPage int `json:"CurrentPage"` 25 | Bookmarked bool `json:"Bookmarked"` 26 | Parent string `json:"Parent"` 27 | } 28 | 29 | // toDocument transforms a rawDocument to a 30 | // cleaner public Document 31 | func (r rawDocument) toDocument() Document { 32 | return Document{ 33 | ID: r.ID, 34 | Version: r.Version, 35 | Type: r.Type, 36 | Name: r.VissibleName, 37 | CurrentPage: r.CurrentPage, 38 | Bookmarked: r.Bookmarked, 39 | Parent: r.Parent, 40 | } 41 | } 42 | 43 | // getDocs makes a call to the Remarkable API in order to get 44 | // a list of documents present on a device. 45 | // urlParams is a string representing optional query string parameters. 46 | // uuid can be used to filter the request to a single document. 47 | // withBlob can be used to indicate that a download url should be given as return. 48 | func (c *Client) getDocs(urlParams string) ([]rawDocument, error) { 49 | req, err := c.newRequest("GET", "document-storage/json/2/docs", nil) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | // add query string parameters 55 | req.URL.RawQuery = urlParams 56 | 57 | var docs []rawDocument 58 | resp, err := c.do(req, &docs) 59 | if err != nil { 60 | return nil, errors.Wrap(err, "request failed") 61 | } 62 | 63 | if resp.StatusCode != http.StatusOK { 64 | return nil, errors.Errorf("wrong http return code: %d", resp.StatusCode) 65 | } 66 | 67 | return docs, err 68 | } 69 | 70 | // getDoc calls getDocs by filtering to a precise uuid and 71 | // by including a withBlob=true parameter to include the download url as return. 72 | func (c *Client) getDoc(uuid string) (rawDocument, error) { 73 | v := url.Values{} 74 | v.Add("doc", uuid) 75 | // assume we always want to have the download url in response 76 | v.Add("withBlob", "true") 77 | 78 | rdocs, err := c.getDocs(v.Encode()) 79 | if err != nil { 80 | return rawDocument{}, errors.Wrap(err, "can't retrieve documents") 81 | } 82 | 83 | if len(rdocs) == 0 { 84 | return rawDocument{}, errors.Wrap(err, "empty document list received") 85 | } 86 | 87 | if !rdocs[0].Success { 88 | return rawDocument{}, errors.Errorf("success false received: %s", rdocs[0].Message) 89 | } 90 | 91 | return rdocs[0], nil 92 | } 93 | 94 | // uploadRequest makes an initial request to the Remarkable API to start a 95 | // document upload. 96 | // The doc parameter is used to configure the upload. 97 | // If it contains a new uuid, it will create an upload for a new document. 98 | // If it contains an existing uuid, it will try to upload another version 99 | // of a document. For the latter to work, the Version parameter of the doc should 100 | // be increased. 101 | // As return, uploadRequest will give a URL that can be used for uploading the actual 102 | // content of the document. 103 | func (c *Client) uploadRequest(doc rawDocument) (string, error) { 104 | payload := []rawDocument{doc} 105 | 106 | req, err := c.newRequest("PUT", "document-storage/json/2/upload/request", payload) 107 | if err != nil { 108 | return "", err 109 | } 110 | 111 | var rdocs []rawDocument 112 | resp, err := c.do(req, &rdocs) 113 | if err != nil { 114 | return "", errors.Wrap(err, "request failed") 115 | } 116 | 117 | if resp.StatusCode != http.StatusOK { 118 | return "", errors.Errorf("wrong http return code: %d", resp.StatusCode) 119 | } 120 | 121 | if len(rdocs) == 0 { 122 | return "", errors.Wrap(err, "empty document list received") 123 | } 124 | 125 | if !rdocs[0].Success { 126 | return "", errors.Errorf("success false received: %s", rdocs[0].Message) 127 | } 128 | 129 | if rdocs[0].BlobURLPut == "" { 130 | return "", errors.New("empty upload url received") 131 | } 132 | 133 | return rdocs[0].BlobURLPut, nil 134 | } 135 | 136 | // getCurrentVersion makes an http call to the Remarkable API to 137 | // fetch a document from a uuid and return its current version. 138 | func (c *Client) getCurrentVersion(uuid string) (int, error) { 139 | rdoc, err := c.getDoc(uuid) 140 | if err != nil { 141 | return 0, errors.Wrap(err, "can't get document") 142 | } 143 | return rdoc.Version, nil 144 | } 145 | -------------------------------------------------------------------------------- /auth/auth.go: -------------------------------------------------------------------------------- 1 | // Package auth has the responsibility to handle the authentication 2 | // to the Remarkable Cloud API. 3 | // 4 | // For this purpose, it provides a *http.Client that can be used with the api package. 5 | // This *http.Client will hold the authentication process that will allow the api package 6 | // to interact with the Remarkable API without worrying about auth. 7 | // We do take advantage of a custom http.Transport that is by default attached to the 8 | // http.Client and that will act as a middleware to attach HTTP auth headers. 9 | // 10 | // This separation means that in the future, another auth could be implemented if 11 | // Remarkable decides to change / improve it. As well, the api package is clearer 12 | // because not cluttered by any auth processes. 13 | package auth 14 | 15 | import ( 16 | "bytes" 17 | "encoding/json" 18 | "errors" 19 | "fmt" 20 | "io/ioutil" 21 | "net/http" 22 | "time" 23 | 24 | "github.com/google/uuid" 25 | ) 26 | 27 | // ClientTimeout is the timeout set for the http.Client 28 | // that auth is providing. 29 | const ClientTimeout time.Duration = time.Second * 10 30 | 31 | const ( 32 | defaultDeviceDesc string = "desktop-windows" 33 | deviceTokenURL string = "https://my.remarkable.com/token/json/2/device/new" 34 | userTokenURL string = "https://my.remarkable.com/token/json/2/user/new" 35 | ) 36 | 37 | var defaultTokenStore FileTokenStore 38 | 39 | // Auth is a structure containing authentication gears to fetch and hold tokens 40 | // for interacting authenticated with the Remarkable Cloud API. 41 | type Auth struct { 42 | ts TokenStore 43 | 44 | // Refresh can be used to force a refresh of the UserToken. 45 | Refresh bool 46 | } 47 | 48 | func New() *Auth { 49 | return NewFromStore(&defaultTokenStore) 50 | } 51 | 52 | func NewFromStore(ts TokenStore) *Auth { 53 | return &Auth{ts, false} 54 | } 55 | 56 | // RegisterDevice will make an HTTP call to the Remarkable API using the provided code 57 | // to register a new device. The code should be gathered at https://my.remarkable.com/generator-device. 58 | // The DeviceToken is then attached to the Auth instance. 59 | func (a *Auth) RegisterDevice(code string) error { 60 | uuid := uuid.New() 61 | 62 | body, err := json.Marshal(map[string]string{ 63 | "code": code, 64 | "deviceDesc": defaultDeviceDesc, 65 | "deviceID": uuid.String(), 66 | }) 67 | 68 | req, err := http.NewRequest("POST", deviceTokenURL, bytes.NewBuffer(body)) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | client := &http.Client{} 74 | 75 | resp, err := client.Do(req) 76 | if err != nil { 77 | return err 78 | } 79 | defer resp.Body.Close() 80 | 81 | if resp.StatusCode != http.StatusOK { 82 | return errors.New("auth: can't register device") 83 | } 84 | 85 | bearer, err := ioutil.ReadAll(resp.Body) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | tks := TokenSet{ 91 | DeviceToken: string(bearer), 92 | UserToken: "", 93 | } 94 | 95 | // persist device token and reset user token 96 | if err := a.ts.Save(tks); err != nil { 97 | return err 98 | } 99 | 100 | return nil 101 | } 102 | 103 | // renewToken will try to fetch a userToken from a deviceToken. 104 | func renewToken(deviceToken string) (userToken string, err error) { 105 | req, err := http.NewRequest("POST", userTokenURL, nil) 106 | if err != nil { 107 | return "", err 108 | } 109 | 110 | req.Header.Set("Authorization", "Bearer "+deviceToken) 111 | 112 | client := &http.Client{} 113 | resp, err := client.Do(req) 114 | if err != nil { 115 | return "", err 116 | } 117 | defer resp.Body.Close() 118 | 119 | if resp.StatusCode != http.StatusOK { 120 | return "", fmt.Errorf("auth: can't renew token (HTTP %d)", resp.StatusCode) 121 | } 122 | 123 | bearer, err := ioutil.ReadAll(resp.Body) 124 | if err != nil { 125 | return "", err 126 | } 127 | 128 | return string(bearer), nil 129 | } 130 | 131 | // Token will return a UserToken fetching it before if nil. 132 | func (a *Auth) Token() (string, error) { 133 | tks, err := a.ts.Load() 134 | if err != nil { 135 | return "", err 136 | } 137 | 138 | if tks.UserToken != "" && !a.Refresh { 139 | return tks.UserToken, nil 140 | } 141 | 142 | if tks.DeviceToken == "" { 143 | return "", errors.New("auth: nil DeviceToken, please register device") 144 | } 145 | 146 | tks.UserToken, err = renewToken(tks.DeviceToken) 147 | if err != nil { 148 | return "", err 149 | } 150 | 151 | if err := a.ts.Save(tks); err != nil { 152 | return "", err 153 | } 154 | 155 | // reset the Refresh flag when the token has been renewed 156 | a.Refresh = false 157 | 158 | return tks.UserToken, nil 159 | } 160 | 161 | // Client returns a configured http.Client that will hold a custom Transport 162 | // with authentication capabilities to the Remarkable Cloud API. 163 | func (a *Auth) Client() *http.Client { 164 | t := Transport{ 165 | Auth: a, 166 | } 167 | 168 | c := http.Client{ 169 | Transport: &t, 170 | Timeout: ClientTimeout, 171 | } 172 | 173 | return &c 174 | } 175 | -------------------------------------------------------------------------------- /archive/file.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "github.com/juruen/rmapi/encoding/rm" 5 | ) 6 | 7 | // Set the default pagedata template to Blank 8 | const defaultPagadata string = "Blank" 9 | 10 | // Zip represents an entire Remarkable archive file. 11 | type Zip struct { 12 | Content Content 13 | Pages []Page 14 | Payload []byte 15 | UUID string 16 | pageMap map[string]int 17 | } 18 | 19 | // NewZip creates a File with sane defaults. 20 | func NewZip() *Zip { 21 | content := Content{ 22 | DummyDocument: false, 23 | ExtraMetadata: ExtraMetadata{ 24 | LastBrushColor: "Black", 25 | LastBrushThicknessScale: "2", 26 | LastColor: "Black", 27 | LastEraserThicknessScale: "2", 28 | LastEraserTool: "Eraser", 29 | LastPen: "Ballpoint", 30 | LastPenColor: "Black", 31 | LastPenThicknessScale: "2", 32 | LastPencil: "SharpPencil", 33 | LastPencilColor: "Black", 34 | LastPencilThicknessScale: "2", 35 | LastTool: "SharpPencil", 36 | ThicknessScale: "2", 37 | LastFinelinerv2Size: "1", 38 | }, 39 | FileType: "", 40 | FontName: "", 41 | LastOpenedPage: 0, 42 | LineHeight: -1, 43 | Margins: 100, 44 | Orientation: "portrait", 45 | PageCount: 0, 46 | Pages: []string{}, 47 | TextScale: 1, 48 | Transform: Transform{ 49 | M11: 1, 50 | M12: 0, 51 | M13: 0, 52 | M21: 0, 53 | M22: 1, 54 | M23: 0, 55 | M31: 0, 56 | M32: 0, 57 | M33: 1, 58 | }, 59 | } 60 | 61 | return &Zip{ 62 | Content: content, 63 | } 64 | } 65 | 66 | // A Page represents a note page. 67 | type Page struct { 68 | // Data is the rm binary encoded file representing the drawn content 69 | Data *rm.Rm 70 | // Metadata is a json file containing information about layers 71 | Metadata Metadata 72 | // Thumbnail is a small image of the overall page 73 | Thumbnail []byte 74 | // Pagedata contains the name of the selected background template 75 | Pagedata string 76 | // page number of the underlying document 77 | DocPage int 78 | } 79 | 80 | // Metadata represents the structure of a .metadata json file associated to a page. 81 | type Metadata struct { 82 | Layers []Layer `json:"layers"` 83 | } 84 | 85 | // Layers is a struct contained into a Metadata struct. 86 | type Layer struct { 87 | Name string `json:"name"` 88 | } 89 | 90 | // Content represents the structure of a .content json file. 91 | type Content struct { 92 | DummyDocument bool `json:"dummyDocument"` 93 | ExtraMetadata ExtraMetadata `json:"extraMetadata"` 94 | 95 | // FileType is "pdf", "epub" or empty for a simple note 96 | FileType string `json:"fileType"` 97 | FontName string `json:"fontName"` 98 | LastOpenedPage int `json:"lastOpenedPage"` 99 | LineHeight int `json:"lineHeight"` 100 | Margins int `json:"margins"` 101 | // Orientation can take "portrait" or "landscape". 102 | Orientation string `json:"orientation"` 103 | PageCount int `json:"pageCount"` 104 | // Pages is a list of page IDs 105 | Pages []string `json:"pages"` 106 | Tags []string `json:"pageTags"` 107 | RedirectionMap []int `json:"redirectionPageMap"` 108 | TextScale int `json:"textScale"` 109 | 110 | Transform Transform `json:"transform"` 111 | } 112 | 113 | // ExtraMetadata is a struct contained into a Content struct. 114 | type ExtraMetadata struct { 115 | LastBrushColor string `json:"LastBrushColor"` 116 | LastBrushThicknessScale string `json:"LastBrushThicknessScale"` 117 | LastColor string `json:"LastColor"` 118 | LastEraserThicknessScale string `json:"LastEraserThicknessScale"` 119 | LastEraserTool string `json:"LastEraserTool"` 120 | LastPen string `json:"LastPen"` 121 | LastPenColor string `json:"LastPenColor"` 122 | LastPenThicknessScale string `json:"LastPenThicknessScale"` 123 | LastPencil string `json:"LastPencil"` 124 | LastPencilColor string `json:"LastPencilColor"` 125 | LastPencilThicknessScale string `json:"LastPencilThicknessScale"` 126 | LastTool string `json:"LastTool"` 127 | ThicknessScale string `json:"ThicknessScale"` 128 | LastFinelinerv2Size string `json:"LastFinelinerv2Size"` 129 | } 130 | 131 | // Transform is a struct contained into a Content struct. 132 | type Transform struct { 133 | M11 float32 `json:"m11"` 134 | M12 float32 `json:"m12"` 135 | M13 float32 `json:"m13"` 136 | M21 float32 `json:"m21"` 137 | M22 float32 `json:"m22"` 138 | M23 float32 `json:"m23"` 139 | M31 float32 `json:"m31"` 140 | M32 float32 `json:"m32"` 141 | M33 float32 `json:"m33"` 142 | } 143 | 144 | // MetadataFile content 145 | type MetadataFile struct { 146 | DocName string `json:"visibleName"` 147 | CollectionType string `json:"type"` 148 | Parent string `json:"parent"` 149 | //LastModified in milliseconds 150 | LastModified string `json:"lastModified"` 151 | LastOpened string `json:"lastOpened"` 152 | LastOpenedPage int `json:"lastOpenedPage"` 153 | Version int `json:"version"` 154 | Pinned bool `json:"pinned"` 155 | Synced bool `json:"synced"` 156 | Modified bool `json:"modified"` 157 | Deleted bool `json:"deleted"` 158 | MetadataModified bool `json:"metadatamodified"` 159 | } 160 | -------------------------------------------------------------------------------- /archive/writer.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "archive/zip" 5 | "bufio" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "path/filepath" 10 | "time" 11 | 12 | "github.com/google/uuid" 13 | ) 14 | 15 | // Write writes an archive file from a Zip struct. 16 | // It automatically generates a uuid if not already 17 | // defined in the struct. 18 | func (z *Zip) Write(w io.Writer) error { 19 | // generate random uuid if not defined 20 | if z.UUID == "" { 21 | z.UUID = uuid.New().String() 22 | } 23 | 24 | archive := zip.NewWriter(w) 25 | 26 | if err := z.writeContent(archive); err != nil { 27 | return err 28 | } 29 | 30 | if err := z.writePayload(archive); err != nil { 31 | return err 32 | } 33 | 34 | if err := z.writePagedata(archive); err != nil { 35 | return err 36 | } 37 | 38 | if err := z.writeThumbnails(archive); err != nil { 39 | return err 40 | } 41 | 42 | if err := z.writeMetadata(archive); err != nil { 43 | return err 44 | } 45 | 46 | if err := z.writeData(archive); err != nil { 47 | return err 48 | } 49 | 50 | archive.Close() 51 | 52 | return nil 53 | } 54 | 55 | // writeContent writes the .content file to the archive. 56 | func (z *Zip) writeContent(zw *zip.Writer) error { 57 | bytes, err := json.MarshalIndent(&z.Content, "", " ") 58 | if err != nil { 59 | return err 60 | } 61 | 62 | name := fmt.Sprintf("%s.content", z.UUID) 63 | 64 | w, err := addToZip(zw, name) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | if _, err := w.Write(bytes); err != nil { 70 | return err 71 | } 72 | 73 | return nil 74 | } 75 | 76 | // writePdf writes a pdf file to the archive if existing in the struct. 77 | func (z *Zip) writePayload(zw *zip.Writer) error { 78 | // skip if no pdf 79 | if z.Payload == nil { 80 | return nil 81 | } 82 | 83 | name := fmt.Sprintf("%s.%s", z.UUID, z.Content.FileType) 84 | 85 | w, err := addToZip(zw, name) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | if _, err := w.Write(z.Payload); err != nil { 91 | return err 92 | } 93 | 94 | return nil 95 | } 96 | 97 | // writePagedata writes a .pagedata file containing 98 | // the name of background templates for each page (one per line). 99 | func (z *Zip) writePagedata(zw *zip.Writer) error { 100 | // don't add pagedata file if no pages 101 | if len(z.Pages) == 0 { 102 | return nil 103 | } 104 | 105 | name := fmt.Sprintf("%s.pagedata", z.UUID) 106 | 107 | w, err := addToZip(zw, name) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | bw := bufio.NewWriter(w) 113 | for _, page := range z.Pages { 114 | template := page.Pagedata 115 | 116 | // set default if empty 117 | if template == "" { 118 | template = defaultPagadata 119 | } 120 | 121 | bw.WriteString(template + "\n") 122 | } 123 | 124 | // write to the underlying io.Writer 125 | bw.Flush() 126 | 127 | return nil 128 | } 129 | 130 | // writeThumbnails writes thumbnail files for each page 131 | // in the archive. 132 | func (z *Zip) writeThumbnails(zw *zip.Writer) error { 133 | for idx, page := range z.Pages { 134 | if page.Thumbnail == nil { 135 | continue 136 | } 137 | 138 | folder := fmt.Sprintf("%s.thumbnail", z.UUID) 139 | name := fmt.Sprintf("%d.jpg", idx) 140 | fn := filepath.Join(folder, name) 141 | 142 | w, err := addToZip(zw, fn) 143 | if err != nil { 144 | return err 145 | } 146 | 147 | if _, err := w.Write(page.Thumbnail); err != nil { 148 | return err 149 | } 150 | 151 | } 152 | 153 | return nil 154 | } 155 | 156 | // writeMetadata writes .json metadata files for each page 157 | // in the archive. 158 | func (z *Zip) writeMetadata(zw *zip.Writer) error { 159 | for idx, page := range z.Pages { 160 | // if no layers available, don't write the metadata file 161 | if len(page.Metadata.Layers) == 0 { 162 | continue 163 | } 164 | 165 | name := fmt.Sprintf("%d-metadata.json", idx) 166 | fn := filepath.Join(z.UUID, name) 167 | 168 | w, err := addToZip(zw, fn) 169 | if err != nil { 170 | return err 171 | } 172 | 173 | bytes, err := json.MarshalIndent(&page.Metadata, "", " ") 174 | if err != nil { 175 | return err 176 | } 177 | 178 | if _, err := w.Write(bytes); err != nil { 179 | return err 180 | } 181 | 182 | } 183 | 184 | return nil 185 | } 186 | 187 | // writeData writes .rm data files for each page 188 | // in the archive. 189 | func (z *Zip) writeData(zw *zip.Writer) error { 190 | for idx, page := range z.Pages { 191 | if page.Data == nil { 192 | continue 193 | } 194 | 195 | name := fmt.Sprintf("%d.rm", idx) 196 | fn := filepath.Join(z.UUID, name) 197 | 198 | w, err := addToZip(zw, fn) 199 | if err != nil { 200 | return err 201 | } 202 | 203 | bytes, err := page.Data.MarshalBinary() 204 | if err != nil { 205 | return err 206 | } 207 | 208 | if _, err := w.Write(bytes); err != nil { 209 | return err 210 | } 211 | 212 | } 213 | 214 | return nil 215 | } 216 | 217 | // addToZip takes a zip.Writer in parameter and creates an io.Writer 218 | // to write the content of a file to add to the zip. 219 | func addToZip(zw *zip.Writer, name string) (io.Writer, error) { 220 | h := &zip.FileHeader{ 221 | Name: name, 222 | Method: zip.Store, 223 | ModifiedTime: uint16(time.Now().UnixNano()), 224 | ModifiedDate: uint16(time.Now().UnixNano()), 225 | } 226 | 227 | writer, err := zw.CreateHeader(h) 228 | if err != nil { 229 | return nil, err 230 | } 231 | 232 | return writer, nil 233 | } 234 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## rmapi master 2 | 3 | ## rmapi 0.0.24 (December 30, 2022) 4 | 5 | - Make sure Windows binaries have an exe extension (@juruen) 6 | 7 | - Use go 1.19 (@juruen) 8 | 9 | ## rmapi 0.0.23 (December 01, 2022) 10 | 11 | - sync 1.5: use parallel requests when getting remote tree (@ddvk) 12 | 13 | - fix syncomplete in mput (@ddvk) 14 | 15 | - update auth url (@ddvk) 16 | 17 | - Add support to nuke 1.5 (@ddvk) 18 | 19 | 20 | ## rmapi 0.0.22 (October 05, 2022) 21 | 22 | - Update 1.5 sync URLs, implement v2 API (@ddvk) 23 | 24 | ## rmapi 0.0.21 (September 04, 2022) 25 | 26 | - Fix time conversion (@ddvk) 27 | 28 | - Update install instructions (@ddvk) 29 | 30 | - Tweak export brush size (@pacien) 31 | 32 | - Update x/sys dep to fix builds with newer Go versions (@andrew-d) 33 | 34 | - Use Go 1.18 35 | 36 | ## rmapi 0.0.20 (July 6, 2022) 37 | - Fix timestamps for sync 1.5 (@ddvk) 38 | 39 | - Output warnings for to stderr for sync 1.5 (@ddvk) 40 | 41 | - Some hack with pages (@abmantis) 42 | 43 | ## rmapi 0.0.19 (January 29, 2022) 44 | 45 | - Fix issue with rate limiting and mput (@ddvk) 46 | 47 | - Fix type in help test (@myersjustinc) 48 | 49 | ## rmapi 0.0.18 (November 15, 2021) 50 | 51 | * More fixes for new sync protocol (@ddvk) 52 | 53 | ## rmapi 0.0.17 (November 02, 2021) 54 | 55 | * Some fixes for new sync protocol (@ddvk) 56 | 57 | ## rmapi 0.0.16 (October 24, 2021) 58 | 59 | * Use Go 1.16 60 | 61 | * Fixed OTP url in CLI and docs 62 | 63 | * Add experimental support for new sync protocol 64 | 65 | ## rmapi 0.0.15 (May 19, 2021) 66 | 67 | * Fix authentication URL 68 | 69 | * stats outputs JSON now 70 | 71 | ## rmapi 0.0.14 (May 03, 2021) 72 | 73 | * Don't dump request/responses when tracing disabled 74 | to fix OOM on low end devices (@ddvk) 75 | 76 | * Fix small typo (Jasdev Singh) 77 | 78 | * Show entry's type in find command output (@ddvk) 79 | 80 | * Remove device token if unable to authenticate (@ddvk) 81 | 82 | * Upload .rm page files (@ddvk) 83 | 84 | * Fix panic in mv command (Casey Marshall) 85 | 86 | * Update install instructions with config folder command (@Caleb) 87 | 88 | * Update doc to correct OTC URL (Hwee-Boon Yar) 89 | 90 | * Fix BrushSize parsing (@jgoel) 91 | 92 | ## rmapi 0.0.13 (December 08, 2020) 93 | 94 | * Copy table contents when creating annotations (@ddvk) 95 | 96 | * Incremental sync (@ddvk) 97 | 98 | * Fix auth retries (@ddvk) 99 | 100 | * Use nested config dir under XDG (@jebw) 101 | 102 | * Bump go verison to 1.15 (@juruen) 103 | 104 | ## rmapi 0.0.12 (June 18, 2020) 105 | 106 | * Use XDG paths conf config file (@ddvk) 107 | 108 | * Fix issue where documents are downloaded again 109 | when the device reboots (@ddvk) 110 | 111 | * Fix annnoation issue where Acrobat Reader wouldn't display 112 | annotations correctly (@ddvk) 113 | 114 | ## rmapi 0.0.11 (April 28, 2020) 115 | 116 | * Add env variables to override cloud API hosts (@ddvk) 117 | 118 | * Upload downloaded zip files (@ddvk) 119 | 120 | * Bug fix: use UTC time when setting document's upload time (@ddvk) 121 | 122 | * Add support to optinally create thumbnails for uploaded documents (@ddvk) 123 | 124 | * Update CI scripts to use Go 1.13 (@ddvk) 125 | 126 | ## rmapi 0.0.10 (April 17, 2020) 127 | 128 | * Multiple annotation fixes (@ddvk) 129 | 130 | * Add support to create thumbnails in large PDF docs (@ddvk) 131 | 132 | * Use community license instead a UniPDF fork (@ddvk) 133 | 134 | * Fix put bug to allow directories and files with the same name (@GjjvdBurg) 135 | 136 | ## rmapi 0.0.9 (February 01, 2020) 137 | 138 | * Change license to AGPL 139 | 140 | * Initial support for PDF annotations with UniPDF 141 | 142 | ## rmapi 0.0.8 (January 06, 2020) 143 | 144 | * Add support for v5 annotations 145 | 146 | ## rmapi 0.0.7 (November 11, 2019) 147 | 148 | * Rename api.v2 to cloud 149 | 150 | ## rmapi 0.0.6 (September 08, 2019) 151 | 152 | * Migrate to go11 modules 153 | 154 | * Add api.v2 155 | 156 | ## rmapi 0.0.5 (August 11, 2019) 157 | 158 | * Fix issue to make document uploads work with reMarkable 1.7.x firmware upgrade 159 | 160 | * Increased http timeout to 5 minutes to enable upload of larger files 161 | 162 | * Add user-agent header to be a good reMarkable citezen 163 | 164 | * Ls may take a directory as an argument 165 | 166 | * Ignore hidden files/directories by default 167 | 168 | * Initial support for annotations 169 | 170 | * Fix panic when autocompleting the "ls" command 171 | 172 | * Add find command 173 | 174 | ## rmapi 0.0.4 (October 1, 2018) 175 | 176 | * Windows fixes 177 | 178 | * Add autocompleter for local files that is used by "put" 179 | 180 | * Fix mv command 181 | 182 | * Put may take a second argument as destination 183 | 184 | * Autocomplete for "put" command only shows ".pdf" files 185 | 186 | * Add support to upload epub files 187 | 188 | * rm supports multiple files 189 | 190 | * Return exit code for non-interactivly commands 191 | 192 | * Vendorize fuse dependencies 193 | 194 | * Use new auth endpoints 195 | 196 | ## rmapi 0.0.3 (February 25, 2018) 197 | 198 | * Update doc 199 | 200 | * Fix file upload 201 | 202 | *Javier Uruen Val* 203 | 204 | ## rmapi 0.0.2 (February 25, 2018) 205 | 206 | * Fix directory creation (fixes #6) 207 | 208 | * Add stat command to show entry's metadata 209 | 210 | *Javier Uruen Val* 211 | 212 | ## rmapi 0.0.1 (February 24, 2018) 213 | 214 | * Initial release with support for most of the API and autocompletion. 215 | 216 | *Javier Uruen Val* 217 | -------------------------------------------------------------------------------- /shell/mput.go: -------------------------------------------------------------------------------- 1 | package shell 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "strings" 9 | 10 | "github.com/abiosoft/ishell" 11 | "github.com/juruen/rmapi/util" 12 | ) 13 | 14 | func mputCmd(ctx *ShellCtxt) *ishell.Cmd { 15 | return &ishell.Cmd{ 16 | Name: "mput", 17 | Help: "recursively copy local files to remote directory", 18 | Completer: createFsEntryCompleter(), 19 | Func: func(c *ishell.Context) { 20 | 21 | argsLen := len(c.Args) 22 | 23 | if argsLen == 0 { 24 | c.Err(errors.New(("missing destination dir"))) 25 | return 26 | } 27 | 28 | if argsLen > 1 { 29 | c.Err(errors.New(("too many arguments for command mput"))) 30 | return 31 | } 32 | 33 | // Past this point, the number of arguments is 1. 34 | 35 | node, err := ctx.api.Filetree().NodeByPath(c.Args[0], ctx.node) 36 | 37 | if err != nil || node.IsFile() { 38 | c.Err(errors.New("remote directory does not exist")) 39 | return 40 | } 41 | 42 | path, err := ctx.api.Filetree().NodeToPath(node) 43 | 44 | if err != nil || node.IsFile() { 45 | c.Err(errors.New("remote directory does not exist")) 46 | return 47 | } 48 | 49 | treeFormatStr := "├" 50 | 51 | // Back up current remote location. 52 | currCtxPath := ctx.path 53 | currCtxNode := ctx.node 54 | // Change to requested directory. 55 | ctx.path = path 56 | ctx.node = node 57 | 58 | c.Println() 59 | err = putFilesAndDirs(ctx, c, "./", 0, &treeFormatStr) 60 | if err != nil { 61 | c.Err(err) 62 | } 63 | err = ctx.api.SyncComplete() 64 | if err != nil { 65 | c.Err(fmt.Errorf("failed to complete the sync: %v", err)) 66 | } 67 | c.Println() 68 | 69 | // Reset. 70 | ctx.path = currCtxPath 71 | ctx.node = currCtxNode 72 | }, 73 | } 74 | } 75 | 76 | // Print the required spaces and characters for tree formatting. 77 | // 78 | // Input -> [*ishell.Context] 79 | // 80 | // [int] tree depth (0 ... N-1) 81 | // [int] Current item index in directory 82 | // [int] Current directory list length 83 | // [*string] Book keeping for tree formatting 84 | func treeFormat(pC *ishell.Context, num int, lIndex int, lSize int, tFS *string) { 85 | 86 | tFStr := "" 87 | 88 | for i := 0; i <= num; i++ { 89 | if i == num { 90 | if lIndex == lSize-1 { 91 | tFStr += "└" 92 | pC.Printf("└── ") // Last item in current directory. 93 | } else if lSize > 1 { 94 | tFStr += "├" 95 | pC.Printf("├── ") 96 | } 97 | } else { 98 | prevStr := string([]rune(*tFS)[i]) 99 | if prevStr == "│" || prevStr == "├" { 100 | tFStr += "│" 101 | pC.Printf("│") 102 | } else { 103 | tFStr += " " 104 | pC.Printf(" ") 105 | } 106 | 107 | pC.Printf(" ") 108 | } 109 | } 110 | 111 | *tFS = tFStr 112 | } 113 | 114 | func putFilesAndDirs(pCtx *ShellCtxt, pC *ishell.Context, localDir string, depth int, tFS *string) error { 115 | 116 | if depth == 0 { 117 | pC.Println(pCtx.path) 118 | } 119 | 120 | os.Chdir(localDir) // Change to the local source directory. 121 | 122 | wd, _ := os.Getwd() 123 | dirList, err := ioutil.ReadDir(wd) 124 | 125 | if err != nil { 126 | pC.Err(fmt.Errorf("could not read the directory: %s", wd)) 127 | return err 128 | } 129 | 130 | lSize := len(dirList) 131 | for index, d := range dirList { 132 | name := d.Name() 133 | 134 | if !pCtx.useHiddenFiles && strings.HasPrefix(d.Name(), ".") { 135 | continue 136 | } 137 | 138 | switch mode := d.Mode(); { 139 | case mode.IsDir(): 140 | 141 | // Is a directory. Create directory and make a recursive call. 142 | _, err := pCtx.api.Filetree().NodeByPath(name, pCtx.node) 143 | 144 | if err != nil { 145 | // Directory does not exist. Create directory. 146 | treeFormat(pC, depth, index, lSize, tFS) 147 | pC.Printf("creating directory [%s]...", name) 148 | doc, err := pCtx.api.CreateDir(pCtx.node.Id(), name, false) 149 | 150 | if err != nil { 151 | pC.Err(errors.New(fmt.Sprint("failed to create directory", err))) 152 | continue 153 | } else { 154 | pC.Println(" complete") 155 | pCtx.api.Filetree().AddDocument(doc) // Add dir to file tree. 156 | } 157 | } else { 158 | // Directory already exists. 159 | treeFormat(pC, depth, index, lSize, tFS) 160 | pC.Printf("directory [%s] already exists\n", name) 161 | } 162 | 163 | // Error checking not required? Unless, someone deletes 164 | // or renames the directory meanwhile. 165 | 166 | node, _ := pCtx.api.Filetree().NodeByPath(name, pCtx.node) 167 | path, _ := pCtx.api.Filetree().NodeToPath(node) 168 | 169 | // Back up current remote location. 170 | currCtxPath := pCtx.path 171 | currCtxNode := pCtx.node 172 | 173 | pCtx.path = path 174 | pCtx.node = node 175 | 176 | err = putFilesAndDirs(pCtx, pC, name, depth+1, tFS) 177 | if err != nil { 178 | return err 179 | } 180 | 181 | // Reset. 182 | pCtx.path = currCtxPath 183 | pCtx.node = currCtxNode 184 | 185 | case mode.IsRegular(): 186 | 187 | docName, ext := util.DocPathToName(name) 188 | 189 | if !util.IsFileTypeSupported(ext) { 190 | continue 191 | } 192 | 193 | _, err := pCtx.api.Filetree().NodeByPath(docName, pCtx.node) 194 | 195 | if err == nil { 196 | // Document already exists. 197 | treeFormat(pC, depth, index, lSize, tFS) 198 | pC.Printf("document [%s] already exists\n", name) 199 | } else { 200 | // Document does not exist. 201 | treeFormat(pC, depth, index, lSize, tFS) 202 | pC.Printf("uploading: [%s]...", name) 203 | doc, err := pCtx.api.UploadDocument(pCtx.node.Id(), name, false) 204 | 205 | if err != nil { 206 | pC.Err(fmt.Errorf("failed to upload file %s", name)) 207 | } else { 208 | // Document uploaded successfully. 209 | pC.Println(" complete") 210 | pCtx.api.Filetree().AddDocument(doc) 211 | } 212 | } 213 | } 214 | } 215 | 216 | if localDir != "./" { 217 | os.Chdir("..") 218 | } 219 | 220 | return nil 221 | } 222 | -------------------------------------------------------------------------------- /filetree/filetree_test.go: -------------------------------------------------------------------------------- 1 | package filetree 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/juruen/rmapi/model" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const ( 11 | DIRECTORY = true 12 | FILE = false 13 | ) 14 | 15 | func createEntry(id, parent, name string, collection bool) *model.Document { 16 | var t string 17 | 18 | if collection { 19 | t = "CollectionType" 20 | } else { 21 | t = "DocumentType" 22 | } 23 | 24 | return &model.Document{ID: id, Parent: parent, VissibleName: name, Type: t} 25 | } 26 | 27 | func createFile(id, parent, name string) *model.Document { 28 | return createEntry(id, parent, name, FILE) 29 | } 30 | 31 | func createDirectory(id, parent, name string) *model.Document { 32 | return createEntry(id, parent, name, DIRECTORY) 33 | } 34 | 35 | func TestCreateFileTreeCtx(t *testing.T) { 36 | ctx := CreateFileTreeCtx() 37 | 38 | assert.Equal(t, "/", ctx.root.Name()) 39 | } 40 | 41 | func TestAddSingleFileToRoot(t *testing.T) { 42 | ctx := CreateFileTreeCtx() 43 | 44 | file := createEntry("1", "", "file", false) 45 | 46 | ctx.AddDocument(file) 47 | 48 | assert.Equal(t, 1, len(ctx.root.Children)) 49 | assert.Equal(t, "file", ctx.root.Children["1"].Name()) 50 | } 51 | 52 | func TestAddDirAndFiles(t *testing.T) { 53 | ctx := CreateFileTreeCtx() 54 | 55 | dir := createDirectory("1", "", "dir") 56 | file := createFile("2", "1", "file") 57 | file1 := createFile("3", "1", "file1") 58 | 59 | ctx.AddDocument(file) 60 | assert.Equal(t, 1, len(ctx.pendingParent)) 61 | 62 | ctx.AddDocument(dir) 63 | assert.Equal(t, 0, len(ctx.pendingParent)) 64 | 65 | ctx.AddDocument(file1) 66 | assert.Equal(t, 0, len(ctx.pendingParent)) 67 | 68 | assert.Equal(t, "/", ctx.root.Name()) 69 | assert.Equal(t, "dir", ctx.root.Children["1"].Name()) 70 | assert.Equal(t, "file", ctx.root.Children["1"].Children["2"].Name()) 71 | assert.Equal(t, "file1", ctx.root.Children["1"].Children["3"].Name()) 72 | 73 | } 74 | 75 | func TestAddSeveralFilesAndDirs(t *testing.T) { 76 | ctx := CreateFileTreeCtx() 77 | 78 | // dir1/dir12/file1 79 | // dir2/file2 80 | // dir3/file3 81 | // dir3/file4 82 | // file5.pdf 83 | 84 | dir1 := createDirectory("1", "", "dir1") 85 | dir12 := createDirectory("2", "1", "dir12") 86 | dir2 := createDirectory("3", "", "dir2") 87 | dir3 := createDirectory("4", "", "dir3") 88 | 89 | file1 := createFile("5", "2", "file1") 90 | file2 := createFile("6", "3", "file2") 91 | file3 := createFile("7", "4", "file3") 92 | file4 := createFile("8", "4", "file4") 93 | file5 := createFile("9", "", "file5") 94 | 95 | ctx.AddDocument(file1) 96 | ctx.AddDocument(file2) 97 | ctx.AddDocument(file3) 98 | ctx.AddDocument(file4) 99 | ctx.AddDocument(file5) 100 | ctx.AddDocument(dir3) 101 | ctx.AddDocument(dir2) 102 | ctx.AddDocument(dir12) 103 | ctx.AddDocument(dir1) 104 | 105 | assert.Equal(t, "/", ctx.root.Name()) 106 | assert.Equal(t, "dir1", ctx.root.Children["1"].Name()) 107 | assert.Equal(t, "dir12", ctx.root.Children["1"].Children["2"].Name()) 108 | assert.Equal(t, "file1", ctx.root.Children["1"].Children["2"].Children["5"].Name()) 109 | assert.Equal(t, "file2", ctx.root.Children["3"].Children["6"].Name()) 110 | assert.Equal(t, "file3", ctx.root.Children["4"].Children["7"].Name()) 111 | assert.Equal(t, "file4", ctx.root.Children["4"].Children["8"].Name()) 112 | assert.Equal(t, "file5", ctx.root.Children["9"].Name()) 113 | } 114 | 115 | func TestNodeByPath(t *testing.T) { 116 | ctx := CreateFileTreeCtx() 117 | 118 | // dir1/dir12/file1 119 | dir1 := createDirectory("1", "", "dir1") 120 | dir12 := createDirectory("2", "1", "dir12") 121 | file1 := createFile("3", "2", "file1") 122 | 123 | ctx.AddDocument(file1) 124 | ctx.AddDocument(dir12) 125 | ctx.AddDocument(dir1) 126 | 127 | node, _ := ctx.NodeByPath("/", ctx.Root()) 128 | assert.Equal(t, "/", node.Name()) 129 | 130 | node, _ = ctx.NodeByPath("/dir1", ctx.Root()) 131 | assert.Equal(t, "dir1", node.Name()) 132 | 133 | node, _ = ctx.NodeByPath("dir1", ctx.Root()) 134 | assert.Equal(t, "dir1", node.Name()) 135 | 136 | node, _ = ctx.NodeByPath("/dir1/dir12", ctx.Root()) 137 | assert.Equal(t, "dir12", node.Name()) 138 | 139 | node, _ = ctx.NodeByPath("/dir1/dir12/file1", ctx.Root()) 140 | assert.Equal(t, "file1", node.Name()) 141 | 142 | dir12Node, _ := ctx.NodeByPath("/dir1/dir12", ctx.Root()) 143 | node, _ = ctx.NodeByPath("file1", dir12Node) 144 | assert.Equal(t, "file1", node.Name()) 145 | 146 | node, _ = ctx.NodeByPath("../dir12/file1", dir12Node) 147 | assert.Equal(t, "file1", node.Name()) 148 | 149 | node, _ = ctx.NodeByPath("./file1", dir12Node) 150 | assert.Equal(t, "file1", node.Name()) 151 | } 152 | 153 | func TestNodeToPath(t *testing.T) { 154 | ctx := CreateFileTreeCtx() 155 | 156 | // dir1/dir12/file1 157 | // dir2/file2 158 | // dir3/file3 159 | // dir3/file4 160 | // file5.pdf 161 | 162 | dir1 := createDirectory("1", "", "dir1") 163 | dir12 := createDirectory("2", "1", "dir12") 164 | dir2 := createDirectory("3", "", "dir2") 165 | dir3 := createDirectory("4", "", "dir3") 166 | 167 | file1 := createFile("5", "2", "file1") 168 | file2 := createFile("6", "3", "file2") 169 | file3 := createFile("7", "4", "file3") 170 | file4 := createFile("8", "4", "file4") 171 | file5 := createFile("9", "", "file5") 172 | 173 | ctx.AddDocument(file1) 174 | ctx.AddDocument(file2) 175 | ctx.AddDocument(file3) 176 | ctx.AddDocument(file4) 177 | ctx.AddDocument(file5) 178 | ctx.AddDocument(dir3) 179 | ctx.AddDocument(dir2) 180 | ctx.AddDocument(dir12) 181 | ctx.AddDocument(dir1) 182 | 183 | path, _ := ctx.NodeToPath(ctx.Root()) 184 | assert.Equal(t, "/", path) 185 | 186 | path, _ = ctx.NodeToPath(ctx.root.Children["1"]) 187 | assert.Equal(t, "/dir1", path) 188 | 189 | path, _ = ctx.NodeToPath(ctx.root.Children["1"].Children["2"]) 190 | assert.Equal(t, "/dir1/dir12", path) 191 | 192 | path, _ = ctx.NodeToPath(ctx.root.Children["1"].Children["2"].Children["5"]) 193 | assert.Equal(t, "/dir1/dir12/file1", path) 194 | 195 | path, _ = ctx.NodeToPath(ctx.root.Children["3"].Children["6"]) 196 | assert.Equal(t, "/dir2/file2", path) 197 | 198 | path, _ = ctx.NodeToPath(ctx.root.Children["9"]) 199 | assert.Equal(t, "/file5", path) 200 | } 201 | -------------------------------------------------------------------------------- /api/sync15/blobdoc.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "crypto/sha256" 7 | "encoding/hex" 8 | "encoding/json" 9 | "errors" 10 | "hash/crc32" 11 | "io" 12 | "sort" 13 | "strconv" 14 | "strings" 15 | "time" 16 | 17 | "github.com/juruen/rmapi/archive" 18 | "github.com/juruen/rmapi/log" 19 | "github.com/juruen/rmapi/model" 20 | ) 21 | 22 | type BlobDoc struct { 23 | Files []*Entry 24 | Entry 25 | Metadata archive.MetadataFile 26 | } 27 | 28 | func NewBlobDoc(name, documentId, colType, parentId string) *BlobDoc { 29 | return &BlobDoc{ 30 | Metadata: archive.MetadataFile{ 31 | DocName: name, 32 | CollectionType: colType, 33 | LastModified: archive.UnixTimestamp(), 34 | Parent: parentId, 35 | }, 36 | Entry: Entry{ 37 | DocumentID: documentId, 38 | }, 39 | } 40 | 41 | } 42 | 43 | func (d *BlobDoc) Rehash() error { 44 | 45 | hash, err := HashEntries(d.Files) 46 | if err != nil { 47 | return err 48 | } 49 | log.Trace.Println("New doc hash: ", hash) 50 | d.Hash = hash 51 | return nil 52 | } 53 | 54 | func (d *BlobDoc) MetadataHashAndReader() (hash string, checksum uint32, reader io.Reader, size int64, err error) { 55 | jsn, err := json.Marshal(d.Metadata) 56 | if err != nil { 57 | return 58 | } 59 | sha := sha256.New() 60 | sha.Write(jsn) 61 | hash = hex.EncodeToString(sha.Sum(nil)) 62 | log.Trace.Println("new hash", hash) 63 | 64 | crc32c := crc32.MakeTable(crc32.Castagnoli) 65 | checksum = crc32.Checksum(jsn, crc32c) 66 | 67 | size = int64(len(jsn)) 68 | reader = bytes.NewReader(jsn) 69 | found := false 70 | for _, f := range d.Files { 71 | if strings.HasSuffix(f.DocumentID, ".metadata") { 72 | f.Hash = hash 73 | found = true 74 | break 75 | } 76 | } 77 | if !found { 78 | err = errors.New("metadata not found") 79 | } 80 | 81 | return 82 | } 83 | 84 | func (d *BlobDoc) AddFile(e *Entry) error { 85 | d.Files = append(d.Files, e) 86 | return d.Rehash() 87 | } 88 | 89 | func (t *HashTree) Add(d *BlobDoc) error { 90 | if len(d.Files) == 0 { 91 | return errors.New("no files") 92 | } 93 | t.Docs = append(t.Docs, d) 94 | return t.Rehash() 95 | } 96 | 97 | func (d *BlobDoc) IndexReader() (io.ReadCloser, error) { 98 | if len(d.Files) == 0 { 99 | return nil, errors.New("no files") 100 | } 101 | pipeReader, pipeWriter := io.Pipe() 102 | w := bufio.NewWriter(pipeWriter) 103 | go func() { 104 | defer pipeWriter.Close() 105 | w.WriteString(SchemaVersion) 106 | w.WriteString("\n") 107 | for _, d := range d.Files { 108 | w.WriteString(d.Line()) 109 | w.WriteString("\n") 110 | } 111 | w.Flush() 112 | }() 113 | 114 | return pipeReader, nil 115 | } 116 | 117 | // ReadMetadata the document metadata from remote blob 118 | func (d *BlobDoc) ReadMetadata(fileEntry *Entry, r RemoteStorage) error { 119 | if strings.HasSuffix(fileEntry.DocumentID, ".metadata") { 120 | log.Trace.Println("Reading metadata: " + d.DocumentID) 121 | 122 | metadata := archive.MetadataFile{} 123 | 124 | meta, err := r.GetReader(fileEntry.Hash) 125 | if err != nil { 126 | return err 127 | } 128 | defer meta.Close() 129 | content, err := io.ReadAll(meta) 130 | if err != nil { 131 | return err 132 | } 133 | err = json.Unmarshal(content, &metadata) 134 | if err != nil { 135 | log.Error.Printf("cannot read metadata %s %v", fileEntry.DocumentID, err) 136 | } 137 | log.Trace.Println("name from metadata: ", metadata.DocName) 138 | d.Metadata = metadata 139 | } 140 | 141 | return nil 142 | } 143 | 144 | func (d *BlobDoc) Line() string { 145 | var sb strings.Builder 146 | if d.Hash == "" { 147 | log.Error.Print("missing hash for: ", d.DocumentID) 148 | } 149 | sb.WriteString(d.Hash) 150 | sb.WriteRune(Delimiter) 151 | sb.WriteString(DocType) 152 | sb.WriteRune(Delimiter) 153 | sb.WriteString(d.DocumentID) 154 | sb.WriteRune(Delimiter) 155 | 156 | numFilesStr := strconv.Itoa(len(d.Files)) 157 | sb.WriteString(numFilesStr) 158 | sb.WriteRune(Delimiter) 159 | sb.WriteString("0") 160 | return sb.String() 161 | } 162 | 163 | // Mirror updates the document to be the same as the remote 164 | func (d *BlobDoc) Mirror(e *Entry, r RemoteStorage) error { 165 | d.Entry = *e 166 | entryIndex, err := r.GetReader(e.Hash) 167 | if err != nil { 168 | return err 169 | } 170 | defer entryIndex.Close() 171 | entries, err := parseIndex(entryIndex) 172 | if err != nil { 173 | return err 174 | } 175 | 176 | head := make([]*Entry, 0) 177 | current := make(map[string]*Entry) 178 | new := make(map[string]*Entry) 179 | 180 | for _, e := range entries { 181 | new[e.DocumentID] = e 182 | } 183 | 184 | //updated and existing 185 | for _, currentEntry := range d.Files { 186 | if newEntry, ok := new[currentEntry.DocumentID]; ok { 187 | if newEntry.Hash != currentEntry.Hash { 188 | err = d.ReadMetadata(newEntry, r) 189 | if err != nil { 190 | return err 191 | } 192 | currentEntry.Hash = newEntry.Hash 193 | } 194 | head = append(head, currentEntry) 195 | current[currentEntry.DocumentID] = currentEntry 196 | } 197 | } 198 | 199 | //add missing 200 | for k, newEntry := range new { 201 | if _, ok := current[k]; !ok { 202 | err = d.ReadMetadata(newEntry, r) 203 | if err != nil { 204 | return err 205 | } 206 | head = append(head, newEntry) 207 | } 208 | } 209 | sort.Slice(head, func(i, j int) bool { return head[i].DocumentID < head[j].DocumentID }) 210 | d.Files = head 211 | return nil 212 | 213 | } 214 | func (d *BlobDoc) ToDocument() *model.Document { 215 | var lastModified string 216 | unixTime, err := strconv.ParseInt(d.Metadata.LastModified, 10, 64) 217 | if err == nil { 218 | //HACK: convert wrong nano timestamps to millis 219 | if len(d.Metadata.LastModified) > 18 { 220 | unixTime /= 1000000 221 | } 222 | 223 | t := time.Unix(unixTime/1000, 0) 224 | lastModified = t.UTC().Format(time.RFC3339Nano) 225 | } 226 | return &model.Document{ 227 | ID: d.DocumentID, 228 | VissibleName: d.Metadata.DocName, 229 | Version: d.Metadata.Version, 230 | Parent: d.Metadata.Parent, 231 | Type: d.Metadata.CollectionType, 232 | CurrentPage: d.Metadata.LastOpenedPage, 233 | ModifiedClient: lastModified, 234 | } 235 | } 236 | -------------------------------------------------------------------------------- /encoding/rm/rm.go: -------------------------------------------------------------------------------- 1 | // Package rm provides primitives for encoding and decoding 2 | // the .rm format which is a proprietary format created by 3 | // Remarkable to store the data of a drawing made with the device. 4 | // 5 | // Axel Huebl has made a great job of understanding this binary format and 6 | // has written an excellent blog post that helped a lot for writting this package. 7 | // https://plasma.ninja/blog/devices/remarkable/binary/format/2017/12/26/reMarkable-lines-file-format.html 8 | // As well, he has its own implementation of this decoder in C++ at this repository. 9 | // https://github.com/ax3l/lines-are-beautiful 10 | // 11 | // To mention that the format has since evolve to a new version labeled as v3 in the 12 | // header. This implementation is targeting this new version. 13 | // 14 | // As Ben Johnson says, "In the Go standard library, we use the term encoding 15 | // and marshaling for two separate but related ideas. An encoder in Go is an object 16 | // that applies structure to a stream of bytes while marshaling refers 17 | // to applying structure to bounded, in-memory bytes." 18 | // https://medium.com/go-walkthrough/go-walkthrough-encoding-package-bc5e912232d 19 | // 20 | // We will follow this convention and refer to marshaling for this encoder/decoder 21 | // because we want to transform a .rm binary into a bounded in-memory representation 22 | // of a .rm file. 23 | // 24 | // To try to be as idiomatic as possible, this package implements the two following interfaces 25 | // of the default encoding package (https://golang.org/pkg/encoding/). 26 | // - BinaryMarshaler 27 | // - BinaryUnmarshaler 28 | // 29 | // The scope of this package is defined as just the encoding/decoding of the .rm format. 30 | // It will only deal with bytes and not files (one must take care of unzipping the archive 31 | // taken from the device, extracting and providing the content of .rm file as bytes). 32 | // 33 | // This package won't be used for retrieving metadata or attached PDF, ePub files. 34 | package rm 35 | 36 | import ( 37 | "fmt" 38 | "strings" 39 | ) 40 | 41 | // Version defines the version number of a remarkable note. 42 | type Version int 43 | 44 | const ( 45 | V3 Version = iota 46 | V5 47 | ) 48 | 49 | // Header starting a .rm binary file. This can help recognizing a .rm file. 50 | const ( 51 | HeaderV3 = "reMarkable .lines file, version=3 " 52 | HeaderV5 = "reMarkable .lines file, version=5 " 53 | HeaderLen = 43 54 | ) 55 | 56 | // Width and Height of the device in pixels. 57 | const ( 58 | Width int = 1404 59 | Height int = 1872 60 | ) 61 | 62 | // BrushColor defines the 3 colors of the brush. 63 | type BrushColor uint32 64 | 65 | // Mapping of the three colors. 66 | const ( 67 | Black BrushColor = 0 68 | Grey BrushColor = 1 69 | White BrushColor = 2 70 | ) 71 | 72 | // BrushType respresents the type of brush. 73 | // 74 | // The different types of brush are explained here: 75 | // https://blog.remarkable.com/how-to-find-your-perfect-writing-instrument-for-notetaking-on-remarkable-f53c8faeab77 76 | type BrushType uint32 77 | 78 | // Mappings for brush types. 79 | const ( 80 | BallPoint BrushType = 2 81 | Marker BrushType = 3 82 | Fineliner BrushType = 4 83 | SharpPencil BrushType = 7 84 | TiltPencil BrushType = 1 85 | Brush BrushType = 0 86 | Highlighter BrushType = 5 87 | Eraser BrushType = 6 88 | EraseArea BrushType = 8 89 | 90 | // v5 brings new brush type IDs 91 | BallPointV5 BrushType = 15 92 | MarkerV5 BrushType = 16 93 | FinelinerV5 BrushType = 17 94 | SharpPencilV5 BrushType = 13 95 | TiltPencilV5 BrushType = 14 96 | BrushV5 BrushType = 12 97 | HighlighterV5 BrushType = 18 98 | ) 99 | 100 | // BrushSize represents the base brush sizes. 101 | type BrushSize float32 102 | 103 | // 3 different brush sizes are noticed. 104 | const ( 105 | Small BrushSize = 1.875 106 | Medium BrushSize = 2.0 107 | Large BrushSize = 2.125 108 | ) 109 | 110 | // A Rm represents an entire .rm file 111 | // and is composed of layers. 112 | type Rm struct { 113 | Version Version 114 | Layers []Layer 115 | } 116 | 117 | // A Layer contains lines. 118 | type Layer struct { 119 | Lines []Line 120 | } 121 | 122 | // A Line is composed of points. 123 | type Line struct { 124 | BrushType BrushType 125 | BrushColor BrushColor 126 | Padding uint32 127 | Unknown float32 128 | BrushSize BrushSize 129 | Points []Point 130 | } 131 | 132 | // A Point has coordinates. 133 | type Point struct { 134 | X float32 135 | Y float32 136 | Speed float32 137 | Direction float32 138 | Width float32 139 | Pressure float32 140 | } 141 | 142 | // New helps creating an empty Rm page. 143 | // By mashaling an empty Rm page and exporting it 144 | // to the device, we should generate an empty page 145 | // as if it were created using the device itself. 146 | // TODO 147 | func New() *Rm { 148 | return &Rm{} 149 | } 150 | 151 | // String implements the fmt.Stringer interface 152 | // The aim is to create a textual representation of a page as in the following image. 153 | // https://plasma.ninja/blog/assets/reMarkable/2017_12_21_reMarkableAll.png 154 | // TODO 155 | func (rm Rm) String() string { 156 | var o strings.Builder 157 | 158 | fmt.Fprintf(&o, "no of layers: %d\n", len(rm.Layers)) 159 | for i, layer := range rm.Layers { 160 | fmt.Fprintf(&o, "layer %d\n", i) 161 | fmt.Fprintf(&o, " nb of lines: %d\n", len(layer.Lines)) 162 | for j, line := range layer.Lines { 163 | fmt.Fprintf(&o, " line %d\n", j) 164 | fmt.Fprintf(&o, " brush type: %d\n", line.BrushType) 165 | fmt.Fprintf(&o, " brush color: %d\n", line.BrushColor) 166 | fmt.Fprintf(&o, " padding: %d\n", line.Padding) 167 | fmt.Fprintf(&o, " brush size: %f\n", line.BrushSize) 168 | fmt.Fprintf(&o, " nb of points: %d\n", len(line.Points)) 169 | for k, point := range line.Points { 170 | fmt.Fprintf(&o, " point %d\n", k) 171 | fmt.Fprintf(&o, " coords: %f, %f\n", point.X, point.Y) 172 | fmt.Fprintf(&o, " speed: %f\n", point.Speed) 173 | fmt.Fprintf(&o, " direction: %f\n", point.Direction) 174 | fmt.Fprintf(&o, " width: %f\n", point.Width) 175 | fmt.Fprintf(&o, " pressure: %f\n", point.Pressure) 176 | } 177 | } 178 | } 179 | return o.String() 180 | } 181 | -------------------------------------------------------------------------------- /archive/zipdoc.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "archive/zip" 5 | "bytes" 6 | "encoding/json" 7 | "fmt" 8 | "image/jpeg" 9 | "io/ioutil" 10 | "os" 11 | "path" 12 | "strconv" 13 | "time" 14 | 15 | uuid "github.com/google/uuid" 16 | "github.com/juruen/rmapi/log" 17 | "github.com/juruen/rmapi/util" 18 | "github.com/nfnt/resize" 19 | pdfmodel "github.com/unidoc/unipdf/v3/model" 20 | "github.com/unidoc/unipdf/v3/render" 21 | ) 22 | 23 | func makeThumbnail(pdf []byte) ([]byte, error) { 24 | reader, err := pdfmodel.NewPdfReader(bytes.NewReader(pdf)) 25 | if err != nil { 26 | return nil, err 27 | } 28 | page, err := reader.GetPage(1) 29 | if err != nil { 30 | return nil, err 31 | } 32 | 33 | device := render.NewImageDevice() 34 | 35 | image, err := device.Render(page) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | thumbnail := resize.Resize(280, 374, image, resize.Lanczos3) 41 | out := &bytes.Buffer{} 42 | jpeg.Encode(out, thumbnail, nil) 43 | 44 | return out.Bytes(), nil 45 | } 46 | 47 | // GetIdFromZip tries to get the Document UUID from an archive 48 | func GetIdFromZip(srcPath string) (id string, err error) { 49 | file, err := os.Open(srcPath) 50 | if err != nil { 51 | return 52 | } 53 | defer file.Close() 54 | fi, err := file.Stat() 55 | if err != nil { 56 | return 57 | } 58 | zip := Zip{} 59 | err = zip.Read(file, fi.Size()) 60 | if err != nil { 61 | return 62 | } 63 | id = zip.UUID 64 | return 65 | } 66 | 67 | func CreateZipDocument(id, srcPath string) (zipPath string, err error) { 68 | _, ext := util.DocPathToName(srcPath) 69 | fileType := ext 70 | 71 | if ext == util.ZIP { 72 | zipPath = srcPath 73 | return 74 | } 75 | 76 | doc, err := ioutil.ReadFile(srcPath) 77 | if err != nil { 78 | log.Error.Println("failed to open source document file to read", err) 79 | return 80 | } 81 | // Create document (pdf or epub) file 82 | tmp, err := ioutil.TempFile("", "rmapizip") 83 | if err != nil { 84 | return 85 | } 86 | defer tmp.Close() 87 | 88 | if err != nil { 89 | log.Error.Println("failed to create tmpfile for zip doc", err) 90 | return 91 | } 92 | 93 | w := zip.NewWriter(tmp) 94 | defer w.Close() 95 | 96 | var documentPath string 97 | 98 | pages := make([]string, 0) 99 | if ext == util.RM { 100 | pageUUID := uuid.New() 101 | pageID := pageUUID.String() 102 | documentPath = fmt.Sprintf("%s/%s.rm", id, pageID) 103 | fileType = "notebook" 104 | pages = append(pages, pageID) 105 | } else { 106 | documentPath = fmt.Sprintf("%s.%s", id, ext) 107 | pages = append(pages, "") 108 | } 109 | 110 | f, err := w.Create(documentPath) 111 | if err != nil { 112 | log.Error.Println("failed to create doc entry in zip file", err) 113 | return 114 | } 115 | f.Write(doc) 116 | 117 | //try to create a thumbnail 118 | //due to a bug somewhere in unipdf the generation is opt-in 119 | if ext == util.PDF && os.Getenv("RMAPI_THUMBNAILS") != "" { 120 | thumbnail, err := makeThumbnail(doc) 121 | if err != nil { 122 | log.Error.Println("cannot generate thumbnail", err) 123 | } else { 124 | f, err := w.Create(fmt.Sprintf("%s.thumbnails/0.jpg", id)) 125 | if err != nil { 126 | log.Error.Println("failed to create doc entry in zip file", err) 127 | return "", err 128 | } 129 | f.Write(thumbnail) 130 | } 131 | } 132 | 133 | // Create pagedata file 134 | f, err = w.Create(fmt.Sprintf("%s.pagedata", id)) 135 | if err != nil { 136 | log.Error.Println("failed to create content entry in zip file", err) 137 | return 138 | } 139 | f.Write(make([]byte, 0)) 140 | 141 | // Create content content 142 | f, err = w.Create(fmt.Sprintf("%s.content", id)) 143 | if err != nil { 144 | log.Error.Println("failed to create content entry in zip file", err) 145 | return 146 | } 147 | 148 | c, err := createZipContent(fileType, pages) 149 | if err != nil { 150 | return 151 | } 152 | 153 | f.Write([]byte(c)) 154 | zipPath = tmp.Name() 155 | 156 | return 157 | } 158 | 159 | func CreateZipDirectory(id string) (string, error) { 160 | tmp, err := ioutil.TempFile("", "rmapizip") 161 | 162 | if err != nil { 163 | log.Error.Println("failed to create tmpfile for zip dir", err) 164 | return "", err 165 | } 166 | defer tmp.Close() 167 | 168 | w := zip.NewWriter(tmp) 169 | defer w.Close() 170 | 171 | // Create content content 172 | f, err := w.Create(fmt.Sprintf("%s.content", id)) 173 | if err != nil { 174 | log.Error.Println("failed to create content entry in zip file", err) 175 | return "", err 176 | } 177 | 178 | f.Write([]byte("{}")) 179 | 180 | return tmp.Name(), nil 181 | } 182 | 183 | func createZipContent(ext string, pageIDs []string) (string, error) { 184 | c := Content{ 185 | DummyDocument: false, 186 | ExtraMetadata: ExtraMetadata{ 187 | LastPen: "Finelinerv2", 188 | LastTool: "Finelinerv2", 189 | LastFinelinerv2Size: "1", 190 | }, 191 | FileType: ext, 192 | PageCount: 0, 193 | LastOpenedPage: 0, 194 | LineHeight: -1, 195 | Margins: 180, 196 | TextScale: 1, 197 | Transform: Transform{ 198 | M11: 1, 199 | M12: 0, 200 | M13: 0, 201 | M21: 0, 202 | M22: 1, 203 | M23: 0, 204 | M31: 0, 205 | M32: 0, 206 | M33: 1, 207 | }, 208 | Pages: pageIDs, 209 | } 210 | 211 | cstring, err := json.Marshal(c) 212 | 213 | if err != nil { 214 | log.Error.Println("failed to serialize content file", err) 215 | return "", err 216 | } 217 | 218 | return string(cstring), nil 219 | } 220 | 221 | func CreateContent(id, ext, fpath string, pageIds []string) (fileName, filePath string, err error) { 222 | fileName = id + ".content" 223 | filePath = path.Join(fpath, fileName) 224 | content := "{}" 225 | 226 | if ext != "" { 227 | content, err = createZipContent(ext, pageIds) 228 | if err != nil { 229 | return 230 | } 231 | } 232 | 233 | err = ioutil.WriteFile(filePath, []byte(content), 0600) 234 | return 235 | } 236 | 237 | func UnixTimestamp() string { 238 | t := time.Now().UnixNano() / 1000000 239 | tf := strconv.FormatInt(t, 10) 240 | return tf 241 | } 242 | 243 | func CreateMetadata(id, name, parent, colType, fpath string) (fileName string, filePath string, err error) { 244 | fileName = id + ".metadata" 245 | filePath = path.Join(fpath, fileName) 246 | meta := MetadataFile{ 247 | DocName: name, 248 | Version: 0, 249 | CollectionType: colType, 250 | Parent: parent, 251 | Synced: true, 252 | LastModified: UnixTimestamp(), 253 | } 254 | 255 | c, err := json.Marshal(meta) 256 | if err != nil { 257 | return 258 | } 259 | 260 | err = ioutil.WriteFile(filePath, c, 0600) 261 | return 262 | } 263 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rMAPI 2 | 3 | *Note: rMAPI is effectiviely unmaintaied at this point, and this repo will be archived in the next few weeks. Please see [this discussion for more info](https://github.com/juruen/rmapi/discussions/313).* 4 | 5 | [![Actions Status](https://github.com/juruen/rmapi/workflows/Go/badge.svg)](https://github.com/juruen/rmapi/actions) 6 | 7 | 8 | 9 | rMAPI is a Go app that allows you to access the ReMarkable Cloud API programmatically. 10 | 11 | You can interact with the different API end-points through a shell. However, you can also 12 | run commands non-interactively. This may come in handy to script certain workflows such as 13 | taking automatic backups or uploading documents programmatically. 14 | 15 | 16 | ![Console Capture](docs/console.gif) 17 | 18 | # Some examples of use 19 | 20 | [Tutorial on how to directly print to your reMarkable on Mac with rMAPI](docs/tutorial-print-macosx.md) 21 | 22 | # Warning: experimental support for the new sync protocol 23 | 24 | If `rmapi` stopped working for you in a such way that `gets` were working fine but `puts` didn't seem to work, this 25 | is relevant for you. 26 | 27 | A few months ago, the `Remarkable` folks introduced a new sync protocol. This new protocol is being rolling out 28 | incrementally. Some users are being forced to use it, and some other users are still using the old protocol that 29 | `rmapi` has supported well for a few years now. 30 | 31 | Unfortunately, the `rmapi` contributors are still on the old protocol and this makes it more difficult to test 32 | the new protocol's implementation. However, as we know that there are many users who would like to use 33 | `rmapi` with the new protocol, we have added experimental support for it. The always awesome @ddvk did all the work 34 | here! 35 | 36 | Although `rmapi` will warn you if it needs to use the new protocol, we'd like to take this opportunity again to reiterate that the 37 | support for the new protocol is experimental, and you should make sure you have a back-up of your files in case something goes wrong. 38 | 39 | 40 | # Install 41 | 42 | ## From sources 43 | 44 | Install and build the project: 45 | 46 | ``` 47 | git clone https://github.com/juruen/rmapi 48 | cd rmapi 49 | go install 50 | ``` 51 | 52 | ## Binary 53 | 54 | You can download an already built version for either Linux or OSX from [releases](https://github.com/juruen/rmapi/releases). 55 | 56 | ## Docker 57 | 58 | First clone this repository, then build a local container like 59 | 60 | ``` 61 | docker build -t rmapi . 62 | ``` 63 | 64 | create the .config/rmapi config folder 65 | 66 | ``` 67 | mkdir -p $HOME/.config/rmapi 68 | ``` 69 | 70 | and run by mounting the .config/rmapi folder 71 | 72 | ``` 73 | docker run -v $HOME/.config/rmapi/:/home/app/.config/rmapi/ -it rmapi 74 | ``` 75 | 76 | Issue non-interactive commands by appending to the `docker run` command: 77 | 78 | ``` 79 | docker run -v $HOME/.config/rmapi/:/home/app/.config/rmapi/ rmapi help 80 | ``` 81 | 82 | # API support 83 | 84 | - [x] list files and directories 85 | - [x] move around directories 86 | - [x] download a specific file 87 | - [x] download a directory and all its files and subdiretores recursively 88 | - [x] create a directory 89 | - [x] delete a file or a directory 90 | - [x] move/rename a file or a directory 91 | - [x] upload a specific file 92 | - [ ] live syncs 93 | 94 | # Annotations 95 | 96 | - Initial support to generate a PDF with annotations. 97 | 98 | # Shell ergonomics 99 | 100 | - [x] autocomplete 101 | - [ ] globbing 102 | - [x] upload a directory and all its files and subdirectories recursively 103 | 104 | # Commands 105 | 106 | Start the shell by running `rmapi` 107 | 108 | ## List current directory 109 | 110 | Use `ls` to list the contents of the current directory. Entries are listed with `[d]` if they 111 | are directories, and `[f]` if they are files. 112 | 113 | ## Change current directory 114 | 115 | Use `cd` to change the current directory to any other directory in the hierarchy. 116 | 117 | ## Find a file 118 | 119 | The command `find` takes one or two arguments. 120 | 121 | If only the first argument is passed, all entries from that point are printed recursively. 122 | 123 | When the second argument is also passed, a regexp is expected, and only those entries that match the regexp are printed. 124 | 125 | Golang standard regexps are used. For instance, to make the regexp case insensitve you can do: 126 | 127 | ``` 128 | find . (?i)foo 129 | ``` 130 | 131 | ## Upload a file 132 | 133 | Use `put path_to_local_file` to upload a file to the current directory. 134 | 135 | You can also specify the destination directory: 136 | 137 | ``` 138 | put book.pdf /books 139 | ``` 140 | 141 | ## Recursively upload directories and files 142 | 143 | Use `mput path_to_dir` to recursively upload all the local files to that directory. 144 | 145 | E.g: upload all the files 146 | 147 | ``` 148 | mput /Papers 149 | ``` 150 | 151 | ![Console Capture](docs/mput-console.png) 152 | 153 | ## Download a file 154 | 155 | Use `get path_to_file` to download a file from the cloud to your local computer. 156 | 157 | ## Recursively download directories and files 158 | 159 | Use `mget path_to_dir` to recursively download all the files in that directory. 160 | 161 | E.g: download all the files 162 | 163 | ``` 164 | mget . 165 | ``` 166 | 167 | ## Download a file and generate a PDF with its annoations 168 | 169 | Use `geta` to download a file and generate a PDF document 170 | with its annotations. 171 | 172 | Please note that its support is very basic for now and only supports one type of pen for now, but 173 | there's work in progress to improve it. 174 | 175 | ## Create a directoy 176 | 177 | Use `mkdir path_to_new_dir` to create a new directory 178 | 179 | ## Remove a directory or a file 180 | 181 | Use `rm directory_or_file` to remove. If it's directory, it needs to be empty in order to be deleted. 182 | 183 | You can remove multiple entries at the same time. 184 | 185 | ## Move/rename a directory or a file 186 | 187 | Use `mv source destination` to move or rename a file or directory. 188 | 189 | ## Stat a directory or file 190 | 191 | Use `stat entry` to dump its metadata as reported by the Cloud API. 192 | 193 | # Run command non-interactively 194 | 195 | Add the commands you want to execute to the arguments of the binary. 196 | 197 | E.g: simple script to download all files from the cloud to your local machine 198 | 199 | ```bash 200 | $ rmapi mget . 201 | ``` 202 | 203 | rMAPI will set the exit code to `0` if the command succeedes, or `1` if it fails. 204 | 205 | # Environment variables 206 | 207 | - `RMAPI_CONFIG`: filepath used to store authentication tokens. When not set, rmapi uses the file `.rmapi` in the home directory of the current user. 208 | - `RMAPI_TRACE=1`: enable trace logging. 209 | - `RMAPI_USE_HIDDEN_FILES=1`: use and traverse hidden files/directories (they are ignored by default). 210 | - `RMAPI_THUMBNAILS`: generate a thumbnail of the first page of a pdf document 211 | - `RMAPI_AUTH`: override the default authorization url 212 | - `RMAPI_DOC`: override the default document storage url 213 | - `RMAPI_HOST`: override all urls 214 | - `RMAPI_CONCURRENT`: sync15: maximum number of goroutines/http requests to use (default: 20) 215 | -------------------------------------------------------------------------------- /annotations/pdf.go: -------------------------------------------------------------------------------- 1 | package annotations 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | 8 | "os" 9 | 10 | "github.com/juruen/rmapi/archive" 11 | "github.com/juruen/rmapi/encoding/rm" 12 | "github.com/juruen/rmapi/log" 13 | "github.com/unidoc/unipdf/v3/annotator" 14 | "github.com/unidoc/unipdf/v3/contentstream" 15 | "github.com/unidoc/unipdf/v3/contentstream/draw" 16 | "github.com/unidoc/unipdf/v3/core" 17 | "github.com/unidoc/unipdf/v3/creator" 18 | pdf "github.com/unidoc/unipdf/v3/model" 19 | ) 20 | 21 | const ( 22 | DeviceWidth = 1404 23 | DeviceHeight = 1872 24 | ) 25 | 26 | var rmPageSize = creator.PageSize{445, 594} 27 | 28 | type PdfGenerator struct { 29 | zipName string 30 | outputFilePath string 31 | options PdfGeneratorOptions 32 | pdfReader *pdf.PdfReader 33 | template bool 34 | } 35 | 36 | type PdfGeneratorOptions struct { 37 | AddPageNumbers bool 38 | AllPages bool 39 | AnnotationsOnly bool //export the annotations without the background/pdf 40 | } 41 | 42 | func CreatePdfGenerator(zipName, outputFilePath string, options PdfGeneratorOptions) *PdfGenerator { 43 | return &PdfGenerator{zipName: zipName, outputFilePath: outputFilePath, options: options} 44 | } 45 | 46 | func normalized(p1 rm.Point, ratioX float64) (float64, float64) { 47 | return float64(p1.X) * ratioX, float64(p1.Y) * ratioX 48 | } 49 | 50 | func (p *PdfGenerator) Generate() error { 51 | file, err := os.Open(p.zipName) 52 | if err != nil { 53 | return err 54 | } 55 | defer func() { _ = file.Close() }() 56 | 57 | zip := archive.NewZip() 58 | 59 | fi, err := file.Stat() 60 | if err != nil { 61 | return err 62 | } 63 | 64 | err = zip.Read(file, fi.Size()) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | if zip.Content.FileType == "epub" { 70 | return errors.New("only pdf and notebooks supported") 71 | } 72 | 73 | if err = p.initBackgroundPages(zip.Payload); err != nil { 74 | return err 75 | } 76 | 77 | if len(zip.Pages) == 0 { 78 | return errors.New("the document has no pages") 79 | } 80 | 81 | c := creator.New() 82 | if p.template { 83 | // use the standard page size 84 | c.SetPageSize(rmPageSize) 85 | } 86 | 87 | if p.pdfReader != nil && p.options.AllPages { 88 | outlines := p.pdfReader.GetOutlineTree() 89 | c.SetOutlineTree(outlines) 90 | } 91 | 92 | for _, pageAnnotations := range zip.Pages { 93 | hasContent := pageAnnotations.Data != nil 94 | 95 | // do not add a page when there are no annotations 96 | if !p.options.AllPages && !hasContent { 97 | continue 98 | } 99 | //1 based, redirected page 100 | pageNum := pageAnnotations.DocPage + 1 101 | 102 | page, err := p.addBackgroundPage(c, pageNum) 103 | if err != nil { 104 | return err 105 | } 106 | 107 | ratio := c.Height() / c.Width() 108 | 109 | var scale float64 110 | if ratio < 1.33 { 111 | scale = c.Width() / DeviceWidth 112 | } else { 113 | scale = c.Height() / DeviceHeight 114 | } 115 | if page == nil { 116 | log.Error.Fatal("page is null") 117 | } 118 | 119 | if err != nil { 120 | return err 121 | } 122 | if !hasContent { 123 | continue 124 | } 125 | 126 | contentCreator := contentstream.NewContentCreator() 127 | contentCreator.Add_q() 128 | 129 | for _, layer := range pageAnnotations.Data.Layers { 130 | for _, line := range layer.Lines { 131 | if len(line.Points) < 1 { 132 | continue 133 | } 134 | if line.BrushType == rm.Eraser { 135 | continue 136 | } 137 | 138 | if line.BrushType == rm.HighlighterV5 { 139 | last := len(line.Points) - 1 140 | x1, y1 := normalized(line.Points[0], scale) 141 | x2, _ := normalized(line.Points[last], scale) 142 | // make horizontal lines only, use y1 143 | width := scale * 30 144 | y1 += width / 2 145 | 146 | lineDef := annotator.LineAnnotationDef{X1: x1 - 1, Y1: c.Height() - y1, X2: x2, Y2: c.Height() - y1} 147 | lineDef.LineColor = pdf.NewPdfColorDeviceRGB(1.0, 1.0, 0.0) //yellow 148 | lineDef.Opacity = 0.5 149 | lineDef.LineWidth = width 150 | ann, err := annotator.CreateLineAnnotation(lineDef) 151 | if err != nil { 152 | return err 153 | } 154 | page.AddAnnotation(ann) 155 | } else { 156 | path := draw.NewPath() 157 | for i := 0; i < len(line.Points); i++ { 158 | x1, y1 := normalized(line.Points[i], scale) 159 | path = path.AppendPoint(draw.NewPoint(x1, c.Height()-y1)) 160 | } 161 | 162 | contentCreator.Add_w(float64(line.BrushSize*6.0 - 10.8)) 163 | 164 | switch line.BrushColor { 165 | case rm.Black: 166 | contentCreator.Add_rg(1.0, 1.0, 1.0) 167 | case rm.White: 168 | contentCreator.Add_rg(0.0, 0.0, 0.0) 169 | case rm.Grey: 170 | contentCreator.Add_rg(0.8, 0.8, 0.8) 171 | } 172 | 173 | //TODO: use bezier 174 | draw.DrawPathWithCreator(path, contentCreator) 175 | 176 | contentCreator.Add_S() 177 | } 178 | } 179 | } 180 | contentCreator.Add_Q() 181 | drawingOperations := contentCreator.Operations().String() 182 | pageContentStreams, err := page.GetAllContentStreams() 183 | //hack: wrap the page content in a context to prevent transformation matrix misalignment 184 | wrapper := []string{"q", pageContentStreams, "Q", drawingOperations} 185 | page.SetContentStreams(wrapper, core.NewFlateEncoder()) 186 | } 187 | 188 | return c.WriteToFile(p.outputFilePath) 189 | } 190 | 191 | func (p *PdfGenerator) initBackgroundPages(pdfArr []byte) error { 192 | if len(pdfArr) > 0 { 193 | pdfReader, err := pdf.NewPdfReader(bytes.NewReader(pdfArr)) 194 | if err != nil { 195 | return err 196 | } 197 | 198 | encrypted, err := pdfReader.IsEncrypted() 199 | if err != nil { 200 | return nil 201 | } 202 | if encrypted { 203 | valid, err := pdfReader.Decrypt([]byte("")) 204 | if err != nil { 205 | return err 206 | } 207 | if !valid { 208 | return fmt.Errorf("cannot decrypt") 209 | } 210 | 211 | } 212 | 213 | p.pdfReader = pdfReader 214 | p.template = false 215 | return nil 216 | } 217 | 218 | p.template = true 219 | return nil 220 | } 221 | 222 | func (p *PdfGenerator) addBackgroundPage(c *creator.Creator, pageNum int) (*pdf.PdfPage, error) { 223 | var page *pdf.PdfPage 224 | 225 | // if page == 0 then empty page 226 | if !p.template && !p.options.AnnotationsOnly && pageNum > 0 { 227 | tmpPage, err := p.pdfReader.GetPage(pageNum) 228 | if err != nil { 229 | return nil, err 230 | } 231 | mbox, err := tmpPage.GetMediaBox() 232 | if err != nil { 233 | return nil, err 234 | } 235 | 236 | // TODO: adjust the page if cropped 237 | pageHeight := mbox.Ury - mbox.Lly 238 | pageWidth := mbox.Urx - mbox.Llx 239 | // use the pdf's page size 240 | c.SetPageSize(creator.PageSize{pageWidth, pageHeight}) 241 | c.AddPage(tmpPage) 242 | page = tmpPage 243 | } else { 244 | page = c.NewPage() 245 | } 246 | 247 | if p.options.AddPageNumbers { 248 | c.DrawFooter(func(block *creator.Block, args creator.FooterFunctionArgs) { 249 | p := c.NewParagraph(fmt.Sprintf("%d", args.PageNum)) 250 | p.SetFontSize(8) 251 | w := block.Width() - 20 252 | h := block.Height() - 10 253 | p.SetPos(w, h) 254 | block.Draw(p) 255 | }) 256 | } 257 | return page, nil 258 | } 259 | -------------------------------------------------------------------------------- /api/sync15/tree.go: -------------------------------------------------------------------------------- 1 | package sync15 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "crypto/sha256" 7 | "errors" 8 | "fmt" 9 | "hash/crc32" 10 | "io" 11 | "os" 12 | "sort" 13 | "strconv" 14 | 15 | "github.com/juruen/rmapi/log" 16 | "golang.org/x/sync/errgroup" 17 | ) 18 | 19 | const SchemaVersion = "3" 20 | const DocType = "80000000" 21 | const FileType = "0" 22 | const Delimiter = ':' 23 | 24 | func FileHashAndSize(file string) ([]byte, int64, uint32, error) { 25 | f, err := os.Open(file) 26 | if err != nil { 27 | return nil, 0, 0, err 28 | } 29 | defer f.Close() 30 | 31 | hasher := sha256.New() 32 | io.Copy(hasher, f) 33 | h := hasher.Sum(nil) 34 | 35 | _, err = f.Seek(0, io.SeekStart) 36 | if err != nil { 37 | return nil, 0, 0, err 38 | } 39 | crc32c := crc32.MakeTable(crc32.Castagnoli) 40 | crc32 := crc32.New(crc32c) 41 | io.Copy(crc32, f) 42 | checksum := crc32.Sum32() 43 | 44 | size, err := f.Seek(0, os.SEEK_CUR) 45 | return h, size, checksum, err 46 | 47 | } 48 | 49 | func parseEntry(line string) (*Entry, error) { 50 | entry := Entry{} 51 | rdr := NewFieldReader(line) 52 | numFields := len(rdr.fields) 53 | if numFields != 5 { 54 | return nil, fmt.Errorf("wrong number of fields %d", numFields) 55 | 56 | } 57 | var err error 58 | entry.Hash, err = rdr.Next() 59 | if err != nil { 60 | return nil, err 61 | } 62 | entry.Type, err = rdr.Next() 63 | if err != nil { 64 | return nil, err 65 | } 66 | entry.DocumentID, err = rdr.Next() 67 | if err != nil { 68 | return nil, err 69 | } 70 | tmp, err := rdr.Next() 71 | if err != nil { 72 | return nil, err 73 | } 74 | entry.Subfiles, err = strconv.Atoi(tmp) 75 | if err != nil { 76 | return nil, fmt.Errorf("cannot read subfiles %s %v", line, err) 77 | } 78 | tmp, err = rdr.Next() 79 | if err != nil { 80 | return nil, err 81 | } 82 | entry.Size, err = strconv.ParseInt(tmp, 10, 64) 83 | if err != nil { 84 | return nil, fmt.Errorf("cannot read size %s %v", line, err) 85 | } 86 | return &entry, nil 87 | } 88 | 89 | func parseIndex(f io.Reader) ([]*Entry, error) { 90 | var entries []*Entry 91 | scanner := bufio.NewScanner(f) 92 | scanner.Scan() 93 | schema := scanner.Text() 94 | 95 | if schema != SchemaVersion { 96 | return nil, errors.New("wrong schema") 97 | } 98 | for scanner.Scan() { 99 | line := scanner.Text() 100 | entry, err := parseEntry(line) 101 | if err != nil { 102 | return nil, fmt.Errorf("cant parse line '%s', %w", line, err) 103 | } 104 | 105 | entries = append(entries, entry) 106 | } 107 | return entries, nil 108 | } 109 | 110 | func (t *HashTree) IndexReader() (io.ReadCloser, error) { 111 | pipeReader, pipeWriter := io.Pipe() 112 | w := bufio.NewWriter(pipeWriter) 113 | go func() { 114 | defer pipeWriter.Close() 115 | w.WriteString(SchemaVersion) 116 | w.WriteString("\n") 117 | for _, d := range t.Docs { 118 | w.WriteString(d.Line()) 119 | w.WriteString("\n") 120 | } 121 | w.Flush() 122 | }() 123 | 124 | return pipeReader, nil 125 | } 126 | 127 | type HashTree struct { 128 | Hash string 129 | Generation int64 130 | Docs []*BlobDoc 131 | CacheVersion int 132 | } 133 | 134 | func (t *HashTree) FindDoc(id string) (*BlobDoc, error) { 135 | //O(n) 136 | for _, d := range t.Docs { 137 | if d.DocumentID == id { 138 | return d, nil 139 | } 140 | } 141 | return nil, fmt.Errorf("doc %s not found", id) 142 | } 143 | 144 | func (t *HashTree) Remove(id string) error { 145 | docIndex := -1 146 | for index, d := range t.Docs { 147 | if d.DocumentID == id { 148 | docIndex = index 149 | break 150 | } 151 | } 152 | if docIndex > -1 { 153 | log.Trace.Printf("Removing %s", id) 154 | length := len(t.Docs) - 1 155 | t.Docs[docIndex] = t.Docs[length] 156 | t.Docs = t.Docs[:length] 157 | 158 | t.Rehash() 159 | return nil 160 | } 161 | return fmt.Errorf("%s not found", id) 162 | } 163 | 164 | func (t *HashTree) Rehash() error { 165 | entries := []*Entry{} 166 | for _, e := range t.Docs { 167 | entries = append(entries, &e.Entry) 168 | } 169 | hash, err := HashEntries(entries) 170 | if err != nil { 171 | return err 172 | } 173 | log.Info.Println("New root hash: ", hash) 174 | t.Hash = hash 175 | return nil 176 | } 177 | 178 | // / Mirror makes the tree look like the storage 179 | func (t *HashTree) Mirror(r RemoteStorage, maxconcurrent int) error { 180 | rootHash, gen, err := r.GetRootIndex() 181 | if err != nil { 182 | return err 183 | } 184 | if rootHash == "" && gen == 0 { 185 | log.Info.Println("Empty cloud") 186 | t.Docs = nil 187 | t.Generation = 0 188 | return nil 189 | } 190 | 191 | if rootHash == t.Hash { 192 | return nil 193 | } 194 | log.Info.Printf("remote root hash different") 195 | 196 | rootIndexReader, err := r.GetReader(rootHash) 197 | if err != nil { 198 | return err 199 | } 200 | defer rootIndexReader.Close() 201 | 202 | entries, err := parseIndex(rootIndexReader) 203 | if err != nil { 204 | return err 205 | } 206 | 207 | head := make([]*BlobDoc, 0) 208 | current := make(map[string]*BlobDoc) 209 | new := make(map[string]*Entry) 210 | 211 | for _, e := range entries { 212 | new[e.DocumentID] = e 213 | } 214 | wg, ctx := errgroup.WithContext(context.TODO()) 215 | wg.SetLimit(maxconcurrent) 216 | 217 | //current documents 218 | for _, doc := range t.Docs { 219 | if entry, ok := new[doc.DocumentID]; ok { 220 | head = append(head, doc) 221 | current[doc.DocumentID] = doc 222 | 223 | if entry.Hash != doc.Hash { 224 | log.Info.Println("doc updated: ", doc.DocumentID) 225 | e := entry 226 | d := doc 227 | wg.Go(func() error { 228 | return d.Mirror(e, r) 229 | }) 230 | } 231 | } 232 | select { 233 | case <-ctx.Done(): 234 | goto EXIT 235 | default: 236 | } 237 | } 238 | 239 | //find new entries 240 | for k, newEntry := range new { 241 | if _, ok := current[k]; !ok { 242 | doc := &BlobDoc{} 243 | log.Trace.Println("doc new: ", k) 244 | head = append(head, doc) 245 | e := newEntry 246 | wg.Go(func() error { 247 | return doc.Mirror(e, r) 248 | }) 249 | } 250 | select { 251 | case <-ctx.Done(): 252 | goto EXIT 253 | default: 254 | } 255 | } 256 | EXIT: 257 | err = wg.Wait() 258 | if err != nil { 259 | return err 260 | } 261 | sort.Slice(head, func(i, j int) bool { return head[i].DocumentID < head[j].DocumentID }) 262 | t.Docs = head 263 | t.Generation = gen 264 | t.Hash = rootHash 265 | return nil 266 | } 267 | 268 | func BuildTree(provider RemoteStorage) (*HashTree, error) { 269 | tree := HashTree{} 270 | 271 | rootHash, gen, err := provider.GetRootIndex() 272 | 273 | if err != nil { 274 | return nil, err 275 | } 276 | tree.Hash = rootHash 277 | tree.Generation = gen 278 | 279 | rootIndex, err := provider.GetReader(rootHash) 280 | if err != nil { 281 | return nil, err 282 | } 283 | 284 | defer rootIndex.Close() 285 | entries, _ := parseIndex(rootIndex) 286 | 287 | for _, e := range entries { 288 | f, _ := provider.GetReader(e.Hash) 289 | defer f.Close() 290 | 291 | doc := &BlobDoc{} 292 | doc.Entry = *e 293 | 294 | items, _ := parseIndex(f) 295 | doc.Files = items 296 | for _, i := range items { 297 | doc.ReadMetadata(i, provider) 298 | } 299 | //don't include deleted items 300 | if doc.Metadata.Deleted { 301 | continue 302 | } 303 | 304 | tree.Docs = append(tree.Docs, doc) 305 | } 306 | 307 | return &tree, nil 308 | 309 | } 310 | -------------------------------------------------------------------------------- /archive/reader.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "archive/zip" 5 | "bufio" 6 | "encoding/json" 7 | "errors" 8 | "io" 9 | "io/ioutil" 10 | "path" 11 | "path/filepath" 12 | "strconv" 13 | "strings" 14 | 15 | "github.com/google/uuid" 16 | "github.com/juruen/rmapi/encoding/rm" 17 | "github.com/juruen/rmapi/log" 18 | "github.com/juruen/rmapi/util" 19 | ) 20 | 21 | // Read fills a Zip parsing a Remarkable archive file. 22 | func (z *Zip) Read(r io.ReaderAt, size int64) error { 23 | zr, err := zip.NewReader(r, size) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | // reading content first because it contains the number of pages 29 | if err := z.readContent(zr); err != nil { 30 | return err 31 | } 32 | 33 | if err := z.readPayload(zr); err != nil { 34 | return err 35 | } 36 | 37 | //uploading and then downloading a file results in 0 pages 38 | if z.Content.PageCount <= 0 { 39 | log.Warning.Printf("PageCount is 0") 40 | return nil 41 | } 42 | 43 | if err := z.readMetadata(zr); err != nil { 44 | return err 45 | } 46 | 47 | if err := z.readPagedata(zr); err != nil { 48 | return err 49 | } 50 | 51 | if err := z.readData(zr); err != nil { 52 | return err 53 | } 54 | 55 | if err := z.readThumbnails(zr); err != nil { 56 | return err 57 | } 58 | 59 | return nil 60 | } 61 | 62 | // readContent reads the .content file contained in an archive and the UUID 63 | func (z *Zip) readContent(zr *zip.Reader) error { 64 | files, err := zipExtFinder(zr, ".content") 65 | if err != nil { 66 | return err 67 | } 68 | 69 | if len(files) != 1 { 70 | return errors.New("archive does not contain a unique content file") 71 | } 72 | 73 | contentFile := files[0] 74 | file, err := contentFile.Open() 75 | if err != nil { 76 | return err 77 | } 78 | defer file.Close() 79 | 80 | bytes, err := ioutil.ReadAll(file) 81 | if err != nil { 82 | return err 83 | } 84 | 85 | if err = json.Unmarshal(bytes, &z.Content); err != nil { 86 | return err 87 | } 88 | p := contentFile.FileInfo().Name() 89 | id, _ := util.DocPathToName(p) 90 | z.UUID = id 91 | 92 | redirectedCount := len(z.Content.RedirectionMap) 93 | pagesCount := len(z.Content.Pages) 94 | if redirectedCount > 0 { 95 | z.pageMap = make(map[string]int) 96 | z.Pages = make([]Page, redirectedCount) 97 | for index, docPage := range z.Content.RedirectionMap { 98 | if index > pagesCount { 99 | log.Warning.Print("redirection > pages") 100 | break 101 | } 102 | pageUUID := z.Content.Pages[index] 103 | z.pageMap[pageUUID] = index 104 | z.Pages[index].DocPage = docPage 105 | } 106 | 107 | } else if pagesCount > 0 { 108 | z.pageMap = make(map[string]int) 109 | z.Pages = make([]Page, pagesCount) 110 | for index, pageUUID := range z.Content.Pages { 111 | z.pageMap[pageUUID] = index 112 | z.Pages[index].DocPage = index 113 | } 114 | } else { 115 | // instantiate the slice of pages 116 | z.Pages = make([]Page, z.Content.PageCount) 117 | } 118 | return nil 119 | } 120 | 121 | // readPagedata reads the .pagedata file contained in an archive 122 | // and iterate to gather which template was used for each page. 123 | func (z *Zip) readPagedata(zr *zip.Reader) error { 124 | files, err := zipExtFinder(zr, ".pagedata") 125 | if err != nil { 126 | return err 127 | } 128 | 129 | if len(files) != 1 { 130 | return errors.New("archive does not contain a unique pagedata file") 131 | } 132 | 133 | file, err := files[0].Open() 134 | if err != nil { 135 | return err 136 | } 137 | defer file.Close() 138 | 139 | // iterate pagedata file lines 140 | sc := bufio.NewScanner(file) 141 | var i int = 0 142 | for sc.Scan() { 143 | line := sc.Text() 144 | z.Pages[i].Pagedata = line 145 | i++ 146 | } 147 | 148 | if err := sc.Err(); err != nil { 149 | return err 150 | } 151 | 152 | return nil 153 | } 154 | 155 | // readPayload tries to extract the payload from an archive if it exists. 156 | func (z *Zip) readPayload(zr *zip.Reader) error { 157 | ext := z.Content.FileType 158 | files, err := zipExtFinder(zr, "."+ext) 159 | if err != nil { 160 | return err 161 | } 162 | 163 | // return if not found 164 | if len(files) != 1 { 165 | return nil 166 | } 167 | 168 | file, err := files[0].Open() 169 | if err != nil { 170 | return err 171 | } 172 | defer file.Close() 173 | 174 | z.Payload, err = ioutil.ReadAll(file) 175 | if err != nil { 176 | return err 177 | } 178 | 179 | return nil 180 | } 181 | 182 | // readData extracts existing .rm files from an archive. 183 | func (z *Zip) readData(zr *zip.Reader) error { 184 | files, err := zipExtFinder(zr, ".rm") 185 | if err != nil { 186 | return err 187 | } 188 | 189 | for _, file := range files { 190 | name, _ := splitExt(file.FileInfo().Name()) 191 | 192 | idx, err := z.pageIndex(name) 193 | if err != nil { 194 | return err 195 | } 196 | 197 | if len(z.Pages) <= idx { 198 | return errors.New("page not found") 199 | } 200 | 201 | r, err := file.Open() 202 | if err != nil { 203 | return err 204 | } 205 | 206 | bytes, err := ioutil.ReadAll(r) 207 | if err != nil { 208 | return err 209 | } 210 | 211 | z.Pages[idx].Data = rm.New() 212 | err = z.Pages[idx].Data.UnmarshalBinary(bytes) 213 | if err != nil { 214 | return err 215 | } 216 | } 217 | 218 | return nil 219 | } 220 | 221 | // readThumbnails extracts existing thumbnails from an archive. 222 | func (z *Zip) readThumbnails(zr *zip.Reader) error { 223 | files, err := zipExtFinder(zr, ".jpg") 224 | if err != nil { 225 | return err 226 | } 227 | 228 | for _, file := range files { 229 | name, _ := splitExt(file.FileInfo().Name()) 230 | 231 | idx, err := strconv.Atoi(name) 232 | if err != nil { 233 | return errors.New("error in .jpg filename") 234 | } 235 | 236 | if len(z.Pages) <= idx { 237 | return errors.New("page not found") 238 | } 239 | 240 | r, err := file.Open() 241 | if err != nil { 242 | return err 243 | } 244 | 245 | z.Pages[idx].Thumbnail, err = ioutil.ReadAll(r) 246 | if err != nil { 247 | return err 248 | } 249 | } 250 | 251 | return nil 252 | } 253 | 254 | func (z *Zip) pageIndex(namePart string) (idx int, err error) { 255 | idx, err = strconv.Atoi(namePart) 256 | if err == nil { 257 | return idx, nil 258 | } 259 | _, err = uuid.Parse(namePart) 260 | if err != nil { 261 | return -1, errors.New("neither int nor uuid page") 262 | } 263 | 264 | if z.pageMap == nil { 265 | return -1, errors.New("no uuid pagemap") 266 | } 267 | var ok bool 268 | idx, ok = z.pageMap[namePart] 269 | if !ok { 270 | log.Warning.Println("Page not found in map: ", namePart) 271 | } 272 | 273 | return 274 | } 275 | 276 | // readMetadata extracts existing .json metadata files from an archive. 277 | func (z *Zip) readMetadata(zr *zip.Reader) error { 278 | files, err := zipExtFinder(zr, ".json") 279 | if err != nil { 280 | return err 281 | } 282 | 283 | for _, file := range files { 284 | name, _ := splitExt(file.FileInfo().Name()) 285 | 286 | // name is 0-metadata.json or uuid-metadata 287 | namePart := strings.TrimSuffix(name, "-metadata") 288 | idx, err := z.pageIndex(namePart) 289 | if err != nil { 290 | return err 291 | } 292 | 293 | if len(z.Pages) <= idx { 294 | return errors.New("page not found") 295 | } 296 | 297 | r, err := file.Open() 298 | if err != nil { 299 | return err 300 | } 301 | 302 | bytes, err := ioutil.ReadAll(r) 303 | if err != nil { 304 | return err 305 | } 306 | 307 | err = json.Unmarshal(bytes, &z.Pages[idx].Metadata) 308 | if err != nil { 309 | return err 310 | } 311 | } 312 | 313 | return nil 314 | } 315 | 316 | // splitExt splits the extension from a filename 317 | func splitExt(name string) (string, string) { 318 | ext := filepath.Ext(name) 319 | return name[0 : len(name)-len(ext)], ext 320 | } 321 | 322 | // zipExtFinder searches for a file matching the substr pattern 323 | // in a zip file. 324 | func zipExtFinder(zr *zip.Reader, ext string) ([]*zip.File, error) { 325 | var files []*zip.File 326 | 327 | for _, file := range zr.File { 328 | parentFolderName := path.Dir(file.FileHeader.Name) 329 | if strings.HasSuffix(parentFolderName, ".highlights") { 330 | continue 331 | } 332 | filename := file.FileInfo().Name() 333 | if _, e := splitExt(filename); e == ext { 334 | files = append(files, file) 335 | } 336 | } 337 | 338 | return files, nil 339 | } 340 | -------------------------------------------------------------------------------- /api/sync10/api.go: -------------------------------------------------------------------------------- 1 | package sync10 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "os" 9 | 10 | "github.com/juruen/rmapi/archive" 11 | "github.com/juruen/rmapi/config" 12 | "github.com/juruen/rmapi/filetree" 13 | "github.com/juruen/rmapi/log" 14 | "github.com/juruen/rmapi/model" 15 | "github.com/juruen/rmapi/transport" 16 | "github.com/juruen/rmapi/util" 17 | ) 18 | 19 | // An ApiCtx allows you interact with the remote reMarkable API 20 | type ApiCtx struct { 21 | Http *transport.HttpClientCtx 22 | ft *filetree.FileTreeCtx 23 | } 24 | 25 | func (ctx *ApiCtx) Filetree() *filetree.FileTreeCtx { 26 | return ctx.ft 27 | } 28 | 29 | func (ctx *ApiCtx) Refresh() (err error) { 30 | return errors.New("not implemented") 31 | } 32 | 33 | // Nuke removes all documents from the account 34 | func (ctx *ApiCtx) Nuke() error { 35 | documents := make([]model.Document, 0) 36 | 37 | if err := ctx.Http.Get(transport.UserBearer, config.ListDocs, nil, &documents); err != nil { 38 | return err 39 | } 40 | 41 | for _, d := range documents { 42 | log.Info.Println("Deleting: ", d.VissibleName) 43 | 44 | err := ctx.Http.Put(transport.UserBearer, config.DeleteEntry, util.InSlice(d), nil) 45 | if err != nil { 46 | log.Error.Println("failed to remove entry", err) 47 | return err 48 | } 49 | } 50 | 51 | return nil 52 | } 53 | 54 | // FetchDocument downloads a document given its ID and saves it locally into dstPath 55 | func (ctx *ApiCtx) FetchDocument(docId, dstPath string) error { 56 | documents := make([]model.Document, 0) 57 | 58 | url := fmt.Sprintf("%s?withBlob=true&doc=%s", config.ListDocs, docId) 59 | 60 | if err := ctx.Http.Get(transport.UserBearer, url, nil, &documents); err != nil { 61 | log.Error.Println("failed to fetch document BlobURLGet", err) 62 | return err 63 | } 64 | 65 | if len(documents) == 0 || documents[0].BlobURLGet == "" { 66 | log.Error.Println("BlobURLGet for document is empty") 67 | return errors.New("no BlobURLGet") 68 | } 69 | 70 | blobUrl := documents[0].BlobURLGet 71 | 72 | src, err := ctx.Http.GetStream(transport.UserBearer, blobUrl) 73 | 74 | if src != nil { 75 | defer src.Close() 76 | } 77 | 78 | if err != nil { 79 | log.Error.Println("Error fetching blob") 80 | return err 81 | } 82 | 83 | dst, err := ioutil.TempFile("", "rmapifile") 84 | 85 | if err != nil { 86 | log.Error.Println("failed to create temp fail to download blob") 87 | return err 88 | } 89 | 90 | tmpPath := dst.Name() 91 | defer dst.Close() 92 | defer os.Remove(tmpPath) 93 | 94 | _, err = io.Copy(dst, src) 95 | 96 | if err != nil { 97 | log.Error.Println("failed to download blob") 98 | return err 99 | } 100 | 101 | _, err = util.CopyFile(tmpPath, dstPath) 102 | 103 | if err != nil { 104 | log.Error.Printf("failed to copy %s to %s, er: %s\n", tmpPath, dstPath, err.Error()) 105 | return err 106 | } 107 | 108 | return nil 109 | } 110 | 111 | // CreateDir creates a remote directory with a given name under the parentId directory 112 | func (ctx *ApiCtx) CreateDir(parentId, name string, notify bool) (*model.Document, error) { 113 | uploadRsp, err := ctx.uploadRequest("", model.DirectoryType) 114 | 115 | if err != nil { 116 | return nil, err 117 | } 118 | 119 | if !uploadRsp.Success { 120 | return nil, errors.New("upload request returned success := false") 121 | } 122 | 123 | zippath, err := archive.CreateZipDirectory(uploadRsp.ID) 124 | 125 | if err != nil { 126 | log.Error.Println("failed to create zip directory", err) 127 | return nil, err 128 | } 129 | 130 | f, err := os.Open(zippath) 131 | 132 | if err != nil { 133 | log.Error.Println("failed to read zip file to upload", zippath, err) 134 | return nil, err 135 | } 136 | 137 | defer f.Close() 138 | 139 | err = ctx.Http.PutStream(transport.UserBearer, uploadRsp.BlobURLPut, f) 140 | 141 | if err != nil { 142 | log.Error.Println("failed to upload directory", err) 143 | return nil, err 144 | } 145 | 146 | metaDoc := model.CreateUploadDocumentMeta(uploadRsp.ID, model.DirectoryType, parentId, name) 147 | 148 | err = ctx.Http.Put(transport.UserBearer, config.UpdateStatus, util.InSlice(metaDoc), nil) 149 | 150 | if err != nil { 151 | log.Error.Println("failed to move entry", err) 152 | return nil, err 153 | } 154 | 155 | doc := metaDoc.ToDocument() 156 | 157 | return &doc, err 158 | 159 | } 160 | 161 | // DeleteEntry removes an entry: either an empty directory or a file 162 | func (ctx *ApiCtx) DeleteEntry(node *model.Node) error { 163 | if node.IsDirectory() && len(node.Children) > 0 { 164 | return errors.New("directory is not empty") 165 | } 166 | 167 | deleteDoc := node.Document.ToDeleteDocument() 168 | 169 | err := ctx.Http.Put(transport.UserBearer, config.DeleteEntry, util.InSlice(deleteDoc), nil) 170 | 171 | if err != nil { 172 | log.Error.Println("failed to remove entry", err) 173 | return err 174 | } 175 | 176 | return nil 177 | } 178 | 179 | // MoveEntry moves an entry (either a directory or a file) 180 | // - src is the source node to be moved 181 | // - dstDir is an existing destination directory 182 | // - name is the new name of the moved entry in the destination directory 183 | func (ctx *ApiCtx) MoveEntry(src, dstDir *model.Node, name string) (*model.Node, error) { 184 | if dstDir.IsFile() { 185 | return nil, errors.New("destination directory is a file") 186 | } 187 | 188 | metaDoc := src.Document.ToMetaDocument() 189 | metaDoc.Version = metaDoc.Version + 1 190 | metaDoc.VissibleName = name 191 | metaDoc.Parent = dstDir.Id() 192 | 193 | err := ctx.Http.Put(transport.UserBearer, config.UpdateStatus, util.InSlice(metaDoc), nil) 194 | 195 | if err != nil { 196 | log.Error.Println("failed to move entry", err) 197 | return nil, err 198 | } 199 | 200 | doc := metaDoc.ToDocument() 201 | 202 | return &model.Node{&doc, src.Children, dstDir}, nil 203 | } 204 | 205 | // UploadDocument uploads a local document given by sourceDocPath under the parentId directory 206 | func (ctx *ApiCtx) UploadDocument(parentId string, sourceDocPath string, notify bool) (*model.Document, error) { 207 | name, ext := util.DocPathToName(sourceDocPath) 208 | 209 | if name == "" { 210 | return nil, errors.New("file name is invalid") 211 | } 212 | 213 | if !util.IsFileTypeSupported(ext) { 214 | return nil, errors.New("unsupported file extension: " + ext) 215 | } 216 | 217 | id := "" 218 | var err error 219 | 220 | //restore document 221 | if ext == "zip" { 222 | id, err = archive.GetIdFromZip(sourceDocPath) 223 | if err != nil { 224 | return nil, err 225 | } 226 | if id == "" { 227 | return nil, errors.New("could not determine the Document UUID") 228 | } 229 | } 230 | 231 | uploadRsp, err := ctx.uploadRequest(id, model.DocumentType) 232 | 233 | if err != nil { 234 | return nil, err 235 | } 236 | 237 | if !uploadRsp.Success { 238 | return nil, errors.New("upload request returned success := false") 239 | } 240 | 241 | zipPath, err := archive.CreateZipDocument(uploadRsp.ID, sourceDocPath) 242 | 243 | if err != nil { 244 | log.Error.Println("failed to create zip doc", err) 245 | return nil, err 246 | } 247 | 248 | f, err := os.Open(zipPath) 249 | defer f.Close() 250 | 251 | if err != nil { 252 | log.Error.Println("failed to read zip file to upload", zipPath, err) 253 | return nil, err 254 | } 255 | 256 | err = ctx.Http.PutStream(transport.UserBearer, uploadRsp.BlobURLPut, f) 257 | 258 | if err != nil { 259 | log.Error.Println("failed to upload zip document", err) 260 | return nil, err 261 | } 262 | 263 | metaDoc := model.CreateUploadDocumentMeta(uploadRsp.ID, model.DocumentType, parentId, name) 264 | 265 | err = ctx.Http.Put(transport.UserBearer, config.UpdateStatus, util.InSlice(metaDoc), nil) 266 | 267 | if err != nil { 268 | log.Error.Println("failed to move entry", err) 269 | return nil, err 270 | } 271 | 272 | doc := metaDoc.ToDocument() 273 | 274 | return &doc, err 275 | } 276 | 277 | func (ctx *ApiCtx) uploadRequest(id string, entryType string) (model.UploadDocumentResponse, error) { 278 | uploadReq := model.CreateUploadDocumentRequest(id, entryType) 279 | uploadRsp := make([]model.UploadDocumentResponse, 0) 280 | 281 | err := ctx.Http.Put(transport.UserBearer, config.UploadRequest, util.InSlice(uploadReq), &uploadRsp) 282 | 283 | if err != nil { 284 | log.Error.Println("failed to to send upload request", err) 285 | return model.UploadDocumentResponse{}, err 286 | } 287 | 288 | return uploadRsp[0], nil 289 | } 290 | 291 | func CreateCtx(http *transport.HttpClientCtx) (*ApiCtx, error) { 292 | 293 | tree, err := DocumentsFileTree(http) 294 | if err != nil { 295 | return nil, fmt.Errorf("failed to fetch document tree %v", err) 296 | } 297 | return &ApiCtx{http, tree}, nil 298 | } 299 | 300 | // DocumentsFileTree reads your remote documents and builds a file tree 301 | // structure to represent them 302 | func DocumentsFileTree(http *transport.HttpClientCtx) (*filetree.FileTreeCtx, error) { 303 | documents := make([]*model.Document, 0) 304 | 305 | if err := http.Get(transport.UserBearer, config.ListDocs, nil, &documents); err != nil { 306 | return nil, err 307 | } 308 | 309 | fileTree := filetree.CreateFileTreeCtx() 310 | 311 | for _, d := range documents { 312 | fileTree.AddDocument(d) 313 | } 314 | 315 | for _, d := range fileTree.Root().Children { 316 | log.Trace.Println(d.Name(), d.IsFile()) 317 | } 318 | 319 | return &fileTree, nil 320 | } 321 | 322 | // SyncComplete does nothing for this version 323 | func (ctx *ApiCtx) SyncComplete() error { 324 | return nil 325 | } 326 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/abiosoft/ishell v2.0.0+incompatible h1:zpwIuEHc37EzrsIYah3cpevrIc8Oma7oZPxr03tlmmw= 2 | github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg= 3 | github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8= 4 | github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530= 5 | github.com/adrg/strutil v0.1.0 h1:IOQnSOAjbE17+7l1lw4rXgX6JuSeJGdZa7BucTMV3Qg= 6 | github.com/adrg/strutil v0.1.0/go.mod h1:pXRr2+IyX5AEPAF5icj/EeTaiflPSD2hvGjnguilZgE= 7 | github.com/adrg/sysfont v0.1.0 h1:vOk13USVkciGOJj9sPT9Gl9zfHUT2HZgsBnwS1Je4Q8= 8 | github.com/adrg/sysfont v0.1.0/go.mod h1:DzISco90USPZJ+lmtpuz1SOTn1fih6YyB0KG2TEP/0U= 9 | github.com/adrg/xdg v0.2.1 h1:VSVdnH7cQ7V+B33qSJHTCRlNgra1607Q8PzEmnvb2Ic= 10 | github.com/adrg/xdg v0.2.1/go.mod h1:ZuOshBmzV4Ta+s23hdfFZnBsdzmoR3US0d7ErpqSbTQ= 11 | github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc= 12 | github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= 13 | github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= 14 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 15 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= 16 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 17 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 18 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 19 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= 21 | github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= 22 | github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= 23 | github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= 24 | github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= 25 | github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= 26 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= 27 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= 28 | github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= 29 | github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 30 | github.com/gunnsth/pkcs7 v0.0.0-20181213175627-3cffc6fbfe83 h1:saj5dTV7eQ1wFg/gVZr1SfbkOmg8CYO9R8frHgQiyR4= 31 | github.com/gunnsth/pkcs7 v0.0.0-20181213175627-3cffc6fbfe83/go.mod h1:xaGEIRenAiJcGgd9p62zbiP4993KaV3PdjczwGnP50I= 32 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 33 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 34 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 35 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 36 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 37 | github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= 38 | github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= 39 | github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 40 | github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 41 | github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= 42 | github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= 43 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 44 | github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= 45 | github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= 46 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 47 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 48 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 49 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 50 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 51 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 52 | github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= 53 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 54 | github.com/unidoc/unipdf/v3 v3.6.1 h1:T9bb9NkFRuv3EKFranrgyCBS4sFV9LKc20UvuDJxhCU= 55 | github.com/unidoc/unipdf/v3 v3.6.1/go.mod h1:oB/vP2a5OJfA5Op0X26CFX1JC8yECO2w+f6pMO/zpoo= 56 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 57 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 58 | golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 59 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= 60 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 61 | golang.org/x/image v0.0.0-20181116024801-cd38e8056d9b/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= 62 | golang.org/x/image v0.5.0 h1:5JMiNunQeQw++mMOz48/ISeNu3Iweh/JaZU8ZLqHRrI= 63 | golang.org/x/image v0.5.0/go.mod h1:FVC7BI/5Ym8R25iw5OLsgshdUBbT1h5jZTpA+mvAdZ4= 64 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 65 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 66 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 67 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 68 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 69 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 70 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 71 | golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= 72 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 73 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 74 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 75 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 76 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 77 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 78 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 79 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 80 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 81 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 82 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= 83 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 84 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 85 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 86 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 87 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 88 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 89 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 90 | golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= 91 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 92 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 93 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 94 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 95 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 96 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 97 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 98 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 99 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 100 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 101 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 102 | --------------------------------------------------------------------------------