├── .gitignore ├── go.mod ├── go.sum ├── Makefile ├── test_tools ├── README.md └── test_real_api.go ├── LICENSE ├── README.md ├── utils_test.go ├── errors.go ├── hashcash.go ├── .github └── workflows │ └── build.yml ├── .golangci.yml ├── messages.go ├── utils.go ├── mega_test.go └── mega.go /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.log 3 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/t3rm1n4l/go-mega 2 | 3 | go 1.24.0 4 | 5 | require golang.org/x/crypto v0.45.0 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= 2 | golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | go build 3 | 4 | test: 5 | go test -cpu 4 -v -race 6 | 7 | # Get the build dependencies 8 | build_dep: 9 | go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.5.0 10 | 11 | # Do source code quality checks 12 | check: 13 | golangci-lint run 14 | go vet ./... 15 | -------------------------------------------------------------------------------- /test_tools/README.md: -------------------------------------------------------------------------------- 1 | # HOW TO TEST 2 | 3 | ## ENV VARIABLES 4 | 5 | - `X_MEGA_USER`: MEGA user email 6 | - `X_MEGA_PASSWORD`: MEGA user password 7 | - `X_MEGA_USER_AGENT`: HashcashDemo 8 | 9 | ## How to run tests 10 | 11 | ```bash 12 | export X_MEGA_USER= 13 | export X_MEGA_PASSWORD= 14 | export X_MEGA_USER_AGENT=HashcashDemo 15 | go build test_real_api.go 16 | ./test_real_api 17 | ``` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Sarath Lakshman 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | go-mega 2 | ======= 3 | 4 | A client library in go for mega.co.nz storage service. 5 | 6 | An implementation of command-line utility can be found at [https://github.com/t3rm1n4l/megacmd](https://github.com/t3rm1n4l/megacmd) 7 | 8 | [![Build Status](https://secure.travis-ci.org/t3rm1n4l/go-mega.png?branch=master)](http://travis-ci.org/t3rm1n4l/go-mega) 9 | 10 | ### What can i do with this library? 11 | This is an API client library for MEGA storage service. Currently, the library supports the basic APIs and operations as follows: 12 | - User login 13 | - Fetch filesystem tree 14 | - Upload file 15 | - Download file 16 | - Create directory 17 | - Move file or directory 18 | - Rename file or directory 19 | - Delete file or directory 20 | - Parallel split download and upload 21 | - Filesystem events auto sync 22 | - Unit tests 23 | 24 | ### API methods 25 | 26 | Please find full doc at [https://pkg.go.dev/github.com/t3rm1n4l/go-mega](https://pkg.go.dev/github.com/t3rm1n4l/go-mega) 27 | 28 | ### Testing 29 | 30 | export X_MEGA_USER= 31 | export X_MEGA_PASSWORD= 32 | export X_MEGA_USER_AGENT=HashcashDemo 33 | $ make test 34 | go test -v 35 | === RUN TestLogin 36 | --- PASS: TestLogin (1.90 seconds) 37 | === RUN TestGetUser 38 | --- PASS: TestGetUser (1.65 seconds) 39 | === RUN TestUploadDownload 40 | --- PASS: TestUploadDownload (12.28 seconds) 41 | === RUN TestMove 42 | --- PASS: TestMove (9.31 seconds) 43 | === RUN TestRename 44 | --- PASS: TestRename (9.16 seconds) 45 | === RUN TestDelete 46 | --- PASS: TestDelete (3.87 seconds) 47 | === RUN TestCreateDir 48 | --- PASS: TestCreateDir (2.34 seconds) 49 | === RUN TestConfig 50 | --- PASS: TestConfig (0.01 seconds) 51 | === RUN TestPathLookup 52 | --- PASS: TestPathLookup (8.54 seconds) 53 | === RUN TestEventNotify 54 | --- PASS: TestEventNotify (19.65 seconds) 55 | PASS 56 | ok github.com/t3rm1n4l/go-mega68.745s 57 | 58 | ### TODO 59 | - Implement APIs for public download url generation 60 | - Implement download from public url 61 | - Add shared user content management APIs 62 | - Add contact list management APIs 63 | 64 | ### License 65 | 66 | MIT 67 | -------------------------------------------------------------------------------- /utils_test.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestGetChunkSizes(t *testing.T) { 9 | const k = 1024 10 | for _, test := range []struct { 11 | size int64 12 | want []chunkSize 13 | }{ 14 | { 15 | size: 0, 16 | want: []chunkSize(nil), 17 | }, 18 | { 19 | size: 1, 20 | want: []chunkSize{ 21 | {0, 1}, 22 | }, 23 | }, 24 | { 25 | size: 128*k - 1, 26 | want: []chunkSize{ 27 | {0, 128*k - 1}, 28 | }, 29 | }, 30 | { 31 | size: 128 * k, 32 | want: []chunkSize{ 33 | {0, 128 * k}, 34 | }, 35 | }, 36 | { 37 | size: 128*k + 1, 38 | want: []chunkSize{ 39 | {0, 128 * k}, 40 | {128 * k, 1}, 41 | }, 42 | }, 43 | { 44 | size: 384*k - 1, 45 | want: []chunkSize{ 46 | {0, 128 * k}, 47 | {128 * k, 256*k - 1}, 48 | }, 49 | }, 50 | { 51 | size: 384 * k, 52 | want: []chunkSize{ 53 | {0, 128 * k}, 54 | {128 * k, 256 * k}, 55 | }, 56 | }, 57 | { 58 | size: 384*k + 1, 59 | want: []chunkSize{ 60 | {0, 128 * k}, 61 | {128 * k, 256 * k}, 62 | {384 * k, 1}, 63 | }, 64 | }, 65 | { 66 | size: 5 * k * k, 67 | want: []chunkSize{ 68 | {0, 128 * k}, 69 | {128 * k, 256 * k}, 70 | {384 * k, 384 * k}, 71 | {768 * k, 512 * k}, 72 | {1280 * k, 640 * k}, 73 | {1920 * k, 768 * k}, 74 | {2688 * k, 896 * k}, 75 | {3584 * k, 1024 * k}, 76 | {4608 * k, 512 * k}, 77 | }, 78 | }, 79 | { 80 | size: 10 * k * k, 81 | want: []chunkSize{ 82 | {0, 128 * k}, 83 | {128 * k, 256 * k}, 84 | {384 * k, 384 * k}, 85 | {768 * k, 512 * k}, 86 | {1280 * k, 640 * k}, 87 | {1920 * k, 768 * k}, 88 | {2688 * k, 896 * k}, 89 | {3584 * k, 1024 * k}, 90 | {4608 * k, 1024 * k}, 91 | {5632 * k, 1024 * k}, 92 | {6656 * k, 1024 * k}, 93 | {7680 * k, 1024 * k}, 94 | {8704 * k, 1024 * k}, 95 | {9728 * k, 512 * k}, 96 | }, 97 | }, 98 | } { 99 | got := getChunkSizes(test.size) 100 | if !reflect.DeepEqual(test.want, got) { 101 | t.Errorf("incorrect chunks for size %d: want %#v, got %#v", test.size, test.want, got) 102 | 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | // General errors 10 | EINTERNAL = errors.New("Internal error occurred") 11 | EARGS = errors.New("Invalid arguments") 12 | EAGAIN = errors.New("Try again") 13 | ERATELIMIT = errors.New("Rate limit reached") 14 | EBADRESP = errors.New("Bad response from server") 15 | 16 | // Upload errors 17 | EFAILED = errors.New("The upload failed. Please restart it from scratch") 18 | ETOOMANY = errors.New("Too many concurrent IP addresses are accessing this upload target URL") 19 | ERANGE = errors.New("The upload file packet is out of range or not starting and ending on a chunk boundary") 20 | EEXPIRED = errors.New("The upload target URL you are trying to access has expired. Please request a fresh one") 21 | 22 | // Filesystem/Account errors 23 | ENOENT = errors.New("Object (typically, node or user) not found") 24 | ECIRCULAR = errors.New("Circular linkage attempted") 25 | EACCESS = errors.New("Access violation") 26 | EEXIST = errors.New("Trying to create an object that already exists") 27 | EINCOMPLETE = errors.New("Trying to access an incomplete resource") 28 | EKEY = errors.New("A decryption operation failed") 29 | ESID = errors.New("Invalid or expired user session, please relogin") 30 | EBLOCKED = errors.New("User blocked") 31 | EOVERQUOTA = errors.New("Request over quota") 32 | ETEMPUNAVAIL = errors.New("Resource temporarily not available, please try again later") 33 | EMACMISMATCH = errors.New("MAC verification failed") 34 | EBADATTR = errors.New("Bad node attribute") 35 | ETOOMANYCONNECTIONS = errors.New("Too many connections on this resource") 36 | EWRITE = errors.New("File could not be written to (or failed post-write integrity check)") 37 | EREAD = errors.New("File could not be read from (or changed unexpectedly during reading)") 38 | EAPPKEY = errors.New("Invalid or missing application key") 39 | ESSL = errors.New("SSL verification failed") 40 | EGOINGOVERQUOTA = errors.New("Not enough quota") 41 | EMFAREQUIRED = errors.New("Multi-factor authentication required") 42 | 43 | // Config errors 44 | EWORKER_LIMIT_EXCEEDED = errors.New("Maximum worker limit exceeded") 45 | ) 46 | 47 | type ErrorMsg int 48 | 49 | func parseError(errno ErrorMsg) error { 50 | switch errno { 51 | case 0: 52 | return nil 53 | case -1: 54 | return EINTERNAL 55 | case -2: 56 | return EARGS 57 | case -3: 58 | return EAGAIN 59 | case -4: 60 | return ERATELIMIT 61 | case -5: 62 | return EFAILED 63 | case -6: 64 | return ETOOMANY 65 | case -7: 66 | return ERANGE 67 | case -8: 68 | return EEXPIRED 69 | case -9: 70 | return ENOENT 71 | case -10: 72 | return ECIRCULAR 73 | case -11: 74 | return EACCESS 75 | case -12: 76 | return EEXIST 77 | case -13: 78 | return EINCOMPLETE 79 | case -14: 80 | return EKEY 81 | case -15: 82 | return ESID 83 | case -16: 84 | return EBLOCKED 85 | case -17: 86 | return EOVERQUOTA 87 | case -18: 88 | return ETEMPUNAVAIL 89 | case -19: 90 | return ETOOMANYCONNECTIONS 91 | case -20: 92 | return EWRITE 93 | case -21: 94 | return EREAD 95 | case -22: 96 | return EAPPKEY 97 | case -23: 98 | return ESSL 99 | case -24: 100 | return EGOINGOVERQUOTA 101 | case -26: 102 | return EMFAREQUIRED 103 | default: 104 | return fmt.Errorf("Unknown mega error %d", errno) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /test_tools/test_real_api.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | mega "github.com/t3rm1n4l/go-mega" 10 | ) 11 | 12 | func main() { 13 | // Get credentials from environment variables or use defaults for testing 14 | email := os.Getenv("X_MEGA_USER") 15 | password := os.Getenv("X_MEGA_PASSWORD") 16 | fmt.Printf("Using email: %s\n", email) 17 | 18 | // Create a new Mega client with debugging enabled 19 | m := mega.New() 20 | 21 | // Enable debug logging to see the hashcash flow if it happens 22 | m.SetDebugger(func(format string, v ...any) { 23 | log.Printf("[DEBUG] "+format, v...) 24 | }) 25 | 26 | m.SetLogger(func(format string, v ...any) { 27 | log.Printf("[INFO] "+format, v...) 28 | }) 29 | 30 | fmt.Println("Testing Mega API connection with hashcash support...") 31 | fmt.Println("This will attempt to login and perform some basic operations.") 32 | fmt.Println("If the server returns 402 Payment Required, the hashcash flow will be triggered.") 33 | 34 | // Set a breakpoint here in GoLand to start debugging 35 | fmt.Println("DEBUG: Starting API tests - set breakpoint here") 36 | 37 | // Test 1: Login 38 | fmt.Println("\n=== Test 1: Login ===") 39 | start := time.Now() 40 | 41 | // Set another breakpoint here to debug the login process 42 | fmt.Println("DEBUG: About to call Login - set breakpoint here") 43 | err := m.Login(email, password) 44 | if err != nil { 45 | log.Fatalf("Login failed: %v", err) 46 | } 47 | fmt.Printf("Login successful in %v\n", time.Since(start)) 48 | 49 | // Test 2: Get user info 50 | fmt.Println("\n=== Test 2: Get User Info ===") 51 | start = time.Now() 52 | 53 | // Set breakpoint here to debug user info retrieval 54 | fmt.Println("DEBUG: About to call GetUser - set breakpoint here") 55 | user, err := m.GetUser() 56 | if err != nil { 57 | log.Printf("GetUser failed: %v", err) 58 | } else { 59 | fmt.Printf("User info retrieved in %v: Email=%s\n", time.Since(start), user.Email) 60 | } 61 | 62 | // Test 3: Get quota info 63 | fmt.Println("\n=== Test 3: Get Quota Info ===") 64 | start = time.Now() 65 | 66 | // Set breakpoint here to debug quota info retrieval 67 | fmt.Println("DEBUG: About to call GetQuota - set breakpoint here") 68 | quota, err := m.GetQuota() 69 | if err != nil { 70 | log.Printf("GetQuota failed: %v", err) 71 | } else { 72 | fmt.Printf("Quota info retrieved in %v: Used=%d, Total=%d\n", 73 | time.Since(start), quota.Cstrg, quota.Mstrg) 74 | } 75 | 76 | // Test 4: List root directory 77 | fmt.Println("\n=== Test 4: List Root Directory ===") 78 | start = time.Now() 79 | root := m.FS.GetRoot() 80 | if root != nil { 81 | children, err := m.FS.GetChildren(root) 82 | if err != nil { 83 | log.Printf("GetChildren failed: %v", err) 84 | } else { 85 | fmt.Printf("Root directory listed in %v: %d items found\n", 86 | time.Since(start), len(children)) 87 | for i, child := range children { 88 | if i < 20 { // Show first 5 items 89 | fmt.Printf(" - %s (type: %d, size: %d)\n", 90 | child.GetName(), child.GetType(), child.GetSize()) 91 | } 92 | } 93 | if len(children) > 20 { 94 | fmt.Printf(" ... and %d more items\n", len(children)-5) 95 | } 96 | } 97 | } 98 | 99 | // Test 5: Re-create client from session 100 | fmt.Println("\n=== Test 5: Re-create client from session ===") 101 | m2 := mega.New() 102 | err = m2.LoginWithKeys(m.GetSessionID(), m.GetMasterKey()) 103 | if err != nil { 104 | log.Fatalf("LoginWithKeys failed: %v", err) 105 | } 106 | 107 | user, err = m2.GetUser() 108 | if err != nil { 109 | log.Printf("GetUser failed: %v", err) 110 | } else { 111 | fmt.Printf("User info retrieved in %v: Email=%s\n", time.Since(start), user.Email) 112 | } 113 | 114 | fmt.Println("\n=== All tests completed ===") 115 | fmt.Println("If you saw any [DEBUG] messages mentioning 'hashcash', the feature was triggered!") 116 | } 117 | -------------------------------------------------------------------------------- /hashcash.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "encoding/base64" 7 | "encoding/binary" 8 | "fmt" 9 | "strconv" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | const numReplications = 262144 15 | const tokenSlotSize = 48 16 | const doneCtxCheckWhenNthIteration = 1000 17 | 18 | // Base64ToBytes decodes a base64url-encoded string to a byte slice 19 | func Base64ToBytes(s string) ([]byte, error) { 20 | if strings.ContainsAny(s, "+/=") { 21 | return nil, fmt.Errorf("invalid base64url format") 22 | } 23 | 24 | data, err := base64.RawURLEncoding.DecodeString(s) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return data, nil 29 | } 30 | 31 | // PadToAESBlockSize ensures a byte slice is padded to AES block size (16 bytes) 32 | func PadToAESBlockSize(data []byte) []byte { 33 | if rem := len(data) % 16; rem != 0 { 34 | padding := make([]byte, 16-rem) 35 | return append(data, padding...) 36 | } 37 | return data 38 | } 39 | 40 | // parseHashcash parses the X-Hashcash header value and returns the components 41 | func parseHashcash(header string) (easiness int, token string, valid bool) { 42 | parts := strings.Split(header, ":") 43 | if len(parts) != 4 { 44 | return 0, "", false 45 | } 46 | 47 | v, err := strconv.Atoi(parts[0]) 48 | if err != nil || v != 1 { 49 | return 0, "", false 50 | } 51 | 52 | e, err := strconv.Atoi(parts[1]) 53 | if err != nil || e < 0 || e > 255 { 54 | return 0, "", false 55 | } 56 | 57 | return e, parts[3], true 58 | } 59 | 60 | // gencash generates a hashcash value based on the token and easiness 61 | func gencash(ctx context.Context, token string, easiness int) string { 62 | threshold := uint32((((easiness & 63) << 1) + 1) << ((easiness>>6)*7 + 3)) 63 | tokenBytes, err := Base64ToBytes(token) 64 | if err != nil { 65 | return "" 66 | } 67 | 68 | tokenBytes = PadToAESBlockSize(tokenBytes) 69 | buffer := make([]byte, 4+numReplications*tokenSlotSize) // 12 MB! 70 | 71 | // Replicate token data across the buffer 72 | for i := 0; i < numReplications; i++ { 73 | copy(buffer[4+i*tokenSlotSize:], tokenBytes) 74 | } 75 | 76 | prefix := make([]byte, 4) 77 | 78 | // Try different prefixes until we find one that satisfies the threshold 79 | iterations := 0 80 | for { 81 | // Check context every doneCtxCheckWhenNthIteration iterations 82 | if iterations++; iterations%doneCtxCheckWhenNthIteration == 0 { 83 | select { 84 | case <-ctx.Done(): 85 | return "" 86 | default: 87 | } 88 | } 89 | 90 | // Increment prefix 91 | prefixSize := 4 92 | for j := 0; j < prefixSize; j++ { 93 | buffer[j]++ 94 | if buffer[j] != 0 { 95 | break 96 | } 97 | // last byte overflowed to zero 98 | if j == prefixSize-1 { 99 | return "" 100 | } 101 | } 102 | 103 | // Save prefix for later 104 | copy(prefix, buffer[:4]) 105 | 106 | hash := sha256.Sum256(buffer) 107 | hashValue := binary.BigEndian.Uint32(hash[:4]) 108 | if hashValue <= threshold { 109 | return base64.RawURLEncoding.EncodeToString(prefix) 110 | } 111 | } 112 | } 113 | 114 | func solveHashCashChallenge(token string, easiness int, timeout time.Duration, workers int) (string, error) { 115 | ctx, cancel := context.WithTimeout(context.Background(), timeout) 116 | defer cancel() 117 | 118 | resultChan := make(chan string, workers) 119 | 120 | workerFunc := func() { 121 | for { 122 | select { 123 | case <-ctx.Done(): 124 | return 125 | default: 126 | result := gencash(ctx, token, easiness) 127 | if result != "" { 128 | select { 129 | case resultChan <- result: 130 | return 131 | case <-ctx.Done(): 132 | return 133 | } 134 | } 135 | } 136 | } 137 | } 138 | 139 | for i := 0; i < workers; i++ { 140 | go workerFunc() 141 | } 142 | 143 | select { 144 | case result := <-resultChan: 145 | return result, nil 146 | case <-ctx.Done(): 147 | return "", ctx.Err() 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Github Actions build for go-mega 3 | # -*- compile-command: "yamllint -f parsable build.yml" -*- 4 | 5 | name: build 6 | 7 | # Trigger the workflow on push or pull request 8 | on: 9 | push: 10 | branches: 11 | - '**' 12 | tags: 13 | - '**' 14 | pull_request: 15 | workflow_dispatch: 16 | inputs: 17 | manual: 18 | description: Manual run (bypass default conditions) 19 | type: boolean 20 | default: true 21 | 22 | jobs: 23 | build: 24 | if: inputs.manual || (github.repository == 't3rm1n4l/go-mega' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) 25 | timeout-minutes: 60 26 | defaults: 27 | run: 28 | shell: bash 29 | strategy: 30 | fail-fast: false 31 | matrix: 32 | job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'go1.24'] 33 | 34 | include: 35 | - job_name: linux 36 | os: ubuntu-latest 37 | go: '>=1.25.0-rc.1' 38 | gotags: cmount 39 | 40 | - job_name: mac_amd64 41 | os: macos-latest 42 | go: '>=1.25.0-rc.1' 43 | goarch: amd64 44 | 45 | - job_name: mac_arm64 46 | os: macos-latest 47 | go: '>=1.25.0-rc.1' 48 | goarch: arm64 49 | 50 | - job_name: windows 51 | os: windows-latest 52 | go: '>=1.25.0-rc.1' 53 | 54 | - job_name: go1.24 55 | os: ubuntu-latest 56 | go: '1.24' 57 | 58 | name: ${{ matrix.job_name }} 59 | 60 | runs-on: ${{ matrix.os }} 61 | 62 | steps: 63 | - name: Checkout 64 | uses: actions/checkout@v5 65 | with: 66 | fetch-depth: 0 67 | 68 | - name: Install Go 69 | uses: actions/setup-go@v6 70 | with: 71 | go-version: ${{ matrix.go }} 72 | check-latest: true 73 | 74 | - name: Set environment variables 75 | run: | 76 | if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi 77 | 78 | - name: Print Go version and environment 79 | run: | 80 | printf "Using go at: $(which go)\n" 81 | printf "Go version: $(go version)\n" 82 | printf "\n\nGo environment:\n\n" 83 | go env 84 | printf "\n\nSystem environment:\n\n" 85 | env 86 | 87 | - name: Build go-mega 88 | run: | 89 | make 90 | 91 | - name: Run tests 92 | run: | 93 | make test 94 | 95 | lint: 96 | if: inputs.manual || (github.repository == 't3rm1n4l/go-mega' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) 97 | timeout-minutes: 30 98 | name: "lint" 99 | runs-on: ubuntu-latest 100 | 101 | steps: 102 | - name: Get runner parameters 103 | id: get-runner-parameters 104 | run: | 105 | echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT 106 | echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT 107 | 108 | - name: Checkout 109 | uses: actions/checkout@v5 110 | with: 111 | fetch-depth: 0 112 | 113 | - name: Install Go 114 | id: setup-go 115 | uses: actions/setup-go@v6 116 | with: 117 | go-version: '>=1.24.0-rc.1' 118 | check-latest: true 119 | cache: false 120 | 121 | - name: Cache 122 | uses: actions/cache@v4 123 | with: 124 | path: | 125 | ~/go/pkg/mod 126 | ~/.cache/go-build 127 | ~/.cache/golangci-lint 128 | key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }} 129 | restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}- 130 | 131 | - name: Code quality test 132 | uses: golangci/golangci-lint-action@v8 133 | with: 134 | version: latest 135 | skip-cache: true 136 | 137 | - name: Install govulncheck 138 | run: go install golang.org/x/vuln/cmd/govulncheck@latest 139 | 140 | - name: Scan for vulnerabilities 141 | run: govulncheck ./... 142 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | linters: 4 | # Configure the linter set. To avoid unexpected results the implicit default 5 | # set is ignored and all the ones to use are explicitly enabled. 6 | default: none 7 | enable: 8 | # Default 9 | - errcheck 10 | - govet 11 | - ineffassign 12 | - staticcheck 13 | - unused 14 | # Additional 15 | - gocritic 16 | - misspell 17 | #- prealloc 18 | #- revive 19 | - unconvert 20 | # Configure checks. Mostly using defaults but with some commented exceptions. 21 | settings: 22 | staticcheck: 23 | # With staticcheck there is only one setting, so to extend the implicit 24 | # default value it must be explicitly included. 25 | checks: 26 | # Default 27 | - all 28 | - -ST1000 29 | - -ST1003 30 | - -ST1005 31 | - -ST1012 32 | - -ST1016 33 | - -ST1020 34 | - -ST1021 35 | - -ST1022 36 | # Disable quickfix checks 37 | - -QF* 38 | gocritic: 39 | # With gocritic there are different settings, but since enabled-checks 40 | # and disabled-checks cannot both be set, for full customization the 41 | # alternative is to disable all defaults and explicitly enable the ones 42 | # to use. 43 | disable-all: true 44 | enabled-checks: 45 | #- appendAssign # Skip default 46 | - argOrder 47 | - assignOp 48 | - badCall 49 | - badCond 50 | #- captLocal # Skip default 51 | - caseOrder 52 | - codegenComment 53 | #- commentFormatting # Skip default 54 | - defaultCaseOrder 55 | - deprecatedComment 56 | - dupArg 57 | - dupBranchBody 58 | - dupCase 59 | - dupSubExpr 60 | - elseif 61 | #- exitAfterDefer # Skip default 62 | - flagDeref 63 | - flagName 64 | #- ifElseChain # Skip default 65 | - mapKey 66 | - newDeref 67 | - offBy1 68 | - regexpMust 69 | - ruleguard # Enable additional check that are not enabled by default 70 | #- singleCaseSwitch # Skip default 71 | - sloppyLen 72 | - sloppyTypeAssert 73 | - switchTrue 74 | - typeSwitchVar 75 | - underef 76 | - unlambda 77 | - unslice 78 | - valSwap 79 | - wrapperFunc 80 | revive: 81 | # With revive there is in reality only one setting, and when at least one 82 | # rule are specified then only these rules will be considered, defaults 83 | # and all others are then implicitly disabled, so must explicitly enable 84 | # all rules to be used. 85 | rules: 86 | - name: blank-imports 87 | disabled: false 88 | - name: context-as-argument 89 | disabled: false 90 | - name: context-keys-type 91 | disabled: false 92 | - name: dot-imports 93 | disabled: false 94 | #- name: empty-block # Skip default 95 | # disabled: true 96 | - name: error-naming 97 | disabled: false 98 | - name: error-return 99 | disabled: false 100 | - name: error-strings 101 | disabled: false 102 | - name: errorf 103 | disabled: false 104 | - name: exported 105 | disabled: false 106 | #- name: increment-decrement # Skip default 107 | # disabled: true 108 | - name: indent-error-flow 109 | disabled: false 110 | - name: package-comments 111 | disabled: false 112 | - name: range 113 | disabled: false 114 | - name: receiver-naming 115 | disabled: false 116 | #- name: redefines-builtin-id # Skip default 117 | # disabled: true 118 | #- name: superfluous-else # Skip default 119 | # disabled: true 120 | - name: time-naming 121 | disabled: false 122 | - name: unexported-return 123 | disabled: false 124 | #- name: unreachable-code # Skip default 125 | # disabled: true 126 | #- name: unused-parameter # Skip default 127 | # disabled: true 128 | - name: var-declaration 129 | disabled: false 130 | - name: var-naming 131 | disabled: false 132 | 133 | formatters: 134 | enable: 135 | - goimports 136 | 137 | issues: 138 | # Maximum issues count per one linter. Set to 0 to disable. Default is 50. 139 | max-issues-per-linter: 0 140 | # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. 141 | max-same-issues: 0 142 | 143 | run: 144 | # Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled). 145 | timeout: 10m 146 | -------------------------------------------------------------------------------- /messages.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import "encoding/json" 4 | 5 | type PreloginMsg struct { 6 | Cmd string `json:"a"` 7 | User string `json:"user"` 8 | } 9 | 10 | type PreloginResp struct { 11 | Version int `json:"v"` 12 | Salt string `json:"s"` 13 | } 14 | 15 | type LoginMsg struct { 16 | Cmd string `json:"a"` 17 | User string `json:"user"` 18 | Handle string `json:"uh"` 19 | SessionKey string `json:"sek,omitempty"` 20 | Si string `json:"si,omitempty"` 21 | Mfa string `json:"mfa,omitempty"` 22 | } 23 | 24 | type LoginResp struct { 25 | Csid string `json:"csid"` 26 | Privk string `json:"privk"` 27 | Key string `json:"k"` 28 | Ach int `json:"ach"` 29 | SessionKey string `json:"sek"` 30 | U string `json:"u"` 31 | } 32 | 33 | type UserMsg struct { 34 | Cmd string `json:"a"` 35 | } 36 | 37 | type UserResp struct { 38 | U string `json:"u"` 39 | S int `json:"s"` 40 | Email string `json:"email"` 41 | Name string `json:"name"` 42 | Key string `json:"k"` 43 | C int `json:"c"` 44 | Pubk string `json:"pubk"` 45 | Privk string `json:"privk"` 46 | Terms string `json:"terms"` 47 | TS string `json:"ts"` 48 | } 49 | 50 | type QuotaMsg struct { 51 | // Action, should be "uq" for quota request 52 | Cmd string `json:"a"` 53 | // xfer should be 1 54 | Xfer int `json:"xfer"` 55 | // Without strg=1 only reports total capacity for account 56 | Strg int `json:"strg,omitempty"` 57 | } 58 | 59 | type QuotaResp struct { 60 | // Mstrg is total capacity in bytes 61 | Mstrg uint64 `json:"mstrg"` 62 | // Cstrg is used capacity in bytes 63 | Cstrg uint64 `json:"cstrg"` 64 | // Per folder usage in bytes? 65 | Cstrgn map[string][]int64 `json:"cstrgn"` 66 | } 67 | 68 | type FilesMsg struct { 69 | Cmd string `json:"a"` 70 | C int `json:"c"` 71 | } 72 | 73 | type FSNode struct { 74 | Hash string `json:"h"` 75 | Parent string `json:"p"` 76 | User string `json:"u"` 77 | T int `json:"t"` 78 | Attr string `json:"a"` 79 | Key string `json:"k"` 80 | Ts int64 `json:"ts"` 81 | SUser string `json:"su"` 82 | SKey string `json:"sk"` 83 | Sz int64 `json:"s"` 84 | } 85 | 86 | type FilesResp struct { 87 | F []FSNode `json:"f"` 88 | 89 | Ok []struct { 90 | Hash string `json:"h"` 91 | Key string `json:"k"` 92 | } `json:"ok"` 93 | 94 | S []struct { 95 | Hash string `json:"h"` 96 | User string `json:"u"` 97 | } `json:"s"` 98 | User []struct { 99 | User string `json:"u"` 100 | C int `json:"c"` 101 | Email string `json:"m"` 102 | } `json:"u"` 103 | Sn string `json:"sn"` 104 | } 105 | 106 | type FileAttr struct { 107 | Name string `json:"n"` 108 | } 109 | 110 | type GetLinkMsg struct { 111 | Cmd string `json:"a"` 112 | N string `json:"n"` 113 | } 114 | 115 | type DownloadMsg struct { 116 | Cmd string `json:"a"` 117 | G int `json:"g"` 118 | P string `json:"p,omitempty"` 119 | N string `json:"n,omitempty"` 120 | SSL int `json:"ssl,omitempty"` 121 | } 122 | 123 | type DownloadResp struct { 124 | G string `json:"g"` 125 | Size uint64 `json:"s"` 126 | Attr string `json:"at"` 127 | Err ErrorMsg `json:"e"` 128 | } 129 | 130 | type UploadMsg struct { 131 | Cmd string `json:"a"` 132 | S int64 `json:"s"` 133 | SSL int `json:"ssl,omitempty"` 134 | } 135 | 136 | type UploadResp struct { 137 | P string `json:"p"` 138 | } 139 | 140 | type UploadCompleteMsg struct { 141 | Cmd string `json:"a"` 142 | T string `json:"t"` 143 | N [1]struct { 144 | H string `json:"h"` 145 | T int `json:"t"` 146 | A string `json:"a"` 147 | K string `json:"k"` 148 | } `json:"n"` 149 | I string `json:"i,omitempty"` 150 | } 151 | 152 | type UploadCompleteResp struct { 153 | F []FSNode `json:"f"` 154 | } 155 | 156 | type FileInfoMsg struct { 157 | Cmd string `json:"a"` 158 | F int `json:"f"` 159 | P string `json:"p"` 160 | } 161 | 162 | type MoveFileMsg struct { 163 | Cmd string `json:"a"` 164 | N string `json:"n"` 165 | T string `json:"t"` 166 | I string `json:"i"` 167 | } 168 | 169 | type FileAttrMsg struct { 170 | Cmd string `json:"a"` 171 | Attr string `json:"attr"` 172 | Key string `json:"key"` 173 | N string `json:"n"` 174 | I string `json:"i"` 175 | } 176 | 177 | type FileDeleteMsg struct { 178 | Cmd string `json:"a"` 179 | N string `json:"n"` 180 | I string `json:"i"` 181 | } 182 | 183 | // GenericEvent is a generic event for parsing the Cmd type before 184 | // decoding more specifically 185 | type GenericEvent struct { 186 | Cmd string `json:"a"` 187 | } 188 | 189 | // FSEvent - event for various file system events 190 | // 191 | // Delete (a=d) 192 | // Update attr (a=u) 193 | // New nodes (a=t) 194 | type FSEvent struct { 195 | Cmd string `json:"a"` 196 | 197 | T struct { 198 | Files []FSNode `json:"f"` 199 | } `json:"t"` 200 | Owner string `json:"ou"` 201 | 202 | N string `json:"n"` 203 | User string `json:"u"` 204 | Attr string `json:"at"` 205 | Key string `json:"k"` 206 | Ts int64 `json:"ts"` 207 | I string `json:"i"` 208 | } 209 | 210 | // Events is received from a poll of the server to read the events 211 | // 212 | // Each event can be an error message or a different field so we delay 213 | // decoding 214 | type Events struct { 215 | W string `json:"w"` 216 | Sn string `json:"sn"` 217 | E []json.RawMessage `json:"a"` 218 | } 219 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "bytes" 5 | "crypto/aes" 6 | "crypto/cipher" 7 | "crypto/rand" 8 | "encoding/base64" 9 | "encoding/binary" 10 | "encoding/json" 11 | "errors" 12 | "math/big" 13 | "net" 14 | "net/http" 15 | "regexp" 16 | "runtime" 17 | "strings" 18 | "time" 19 | ) 20 | 21 | func newHttpClient(timeout time.Duration) *http.Client { 22 | // TODO: Need to test this out 23 | // Doesn't seem to work as expected 24 | c := &http.Client{ 25 | Transport: &http.Transport{ 26 | Dial: func(netw, addr string) (net.Conn, error) { 27 | c, err := net.DialTimeout(netw, addr, timeout) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return c, nil 32 | }, 33 | Proxy: http.ProxyFromEnvironment, 34 | }, 35 | } 36 | return c 37 | } 38 | 39 | // bytes_to_a32 converts the byte slice b to uint32 slice considering 40 | // the bytes to be in big endian order. 41 | func bytes_to_a32(b []byte) ([]uint32, error) { 42 | length := len(b) + 3 43 | a := make([]uint32, length/4) 44 | buf := bytes.NewBuffer(b) 45 | for i := range a { 46 | err := binary.Read(buf, binary.BigEndian, &a[i]) 47 | if err != nil { 48 | return nil, err 49 | } 50 | } 51 | 52 | return a, nil 53 | } 54 | 55 | // a32_to_bytes converts the uint32 slice a to byte slice where each 56 | // uint32 is decoded in big endian order. 57 | func a32_to_bytes(a []uint32) ([]byte, error) { 58 | buf := new(bytes.Buffer) 59 | buf.Grow(len(a) * 4) // To prevent reallocations in Write 60 | for _, v := range a { 61 | err := binary.Write(buf, binary.BigEndian, v) 62 | if err != nil { 63 | return nil, err 64 | } 65 | } 66 | 67 | return buf.Bytes(), nil 68 | } 69 | 70 | // base64urlencode encodes byte slice b using base64 url encoding 71 | // without `=` padding. 72 | func base64urlencode(b []byte) string { 73 | return base64.RawURLEncoding.EncodeToString(b) 74 | } 75 | 76 | // base64urldecode decodes the byte slice b using unpadded base64 url 77 | // decoding. It also allows the characters from standard base64 to be 78 | // compatible with the mega decoder. 79 | func base64urldecode(s string) ([]byte, error) { 80 | enc := base64.RawURLEncoding 81 | // mega base64 decoder accepts the characters from both URLEncoding and StdEncoding 82 | // though nearly all strings are URL encoded 83 | s = strings.ReplaceAll(s, "+", "-") 84 | s = strings.ReplaceAll(s, "/", "_") 85 | return enc.DecodeString(s) 86 | } 87 | 88 | // a32_to_base64 converts uint32 slice to base64 encoded byte slice. 89 | func a32_to_base64(a []uint32) (string, error) { 90 | d, err := a32_to_bytes(a) 91 | if err != nil { 92 | return "", err 93 | } 94 | return base64urlencode(d), nil 95 | } 96 | 97 | // paddnull pads byte slice b such that the size of resulting byte 98 | // slice is a multiple of q. 99 | func paddnull(b []byte, q int) []byte { 100 | if rem := len(b) % q; rem != 0 { 101 | l := q - rem 102 | 103 | for i := 0; i < l; i++ { 104 | b = append(b, 0) 105 | } 106 | } 107 | 108 | return b 109 | } 110 | 111 | // password_key calculates password hash from the user password. 112 | func password_key(p string) ([]byte, error) { 113 | a, err := bytes_to_a32(paddnull([]byte(p), 4)) 114 | if err != nil { 115 | return nil, err 116 | } 117 | 118 | pkey, err := a32_to_bytes([]uint32{0x93C467E3, 0x7DB0C7A4, 0xD1BE3F81, 0x0152CB56}) 119 | if err != nil { 120 | return nil, err 121 | } 122 | 123 | n := (len(a) + 3) / 4 124 | 125 | ciphers := make([]cipher.Block, n) 126 | 127 | for j := 0; j < len(a); j += 4 { 128 | key := []uint32{0, 0, 0, 0} 129 | for k := 0; k < 4; k++ { 130 | if j+k < len(a) { 131 | key[k] = a[k+j] 132 | } 133 | } 134 | bkey, err := a32_to_bytes(key) 135 | if err != nil { 136 | return nil, err 137 | } 138 | ciphers[j/4], err = aes.NewCipher(bkey) // Uses AES in ECB mode 139 | if err != nil { 140 | return nil, err 141 | } 142 | } 143 | 144 | for i := 65536; i > 0; i-- { 145 | for j := 0; j < n; j++ { 146 | ciphers[j].Encrypt(pkey, pkey) 147 | } 148 | } 149 | 150 | return pkey, nil 151 | } 152 | 153 | // stringhash computes generic string hash. Uses k as the key for AES 154 | // cipher. 155 | func stringhash(s string, k []byte) (string, error) { 156 | a, err := bytes_to_a32(paddnull([]byte(s), 4)) 157 | if err != nil { 158 | return "", err 159 | } 160 | h := []uint32{0, 0, 0, 0} 161 | for i, v := range a { 162 | h[i&3] ^= v 163 | } 164 | 165 | hb, err := a32_to_bytes(h) 166 | if err != nil { 167 | return "", err 168 | } 169 | cipher, err := aes.NewCipher(k) 170 | if err != nil { 171 | return "", err 172 | } 173 | for i := 16384; i > 0; i-- { 174 | cipher.Encrypt(hb, hb) 175 | } 176 | ha, err := bytes_to_a32(paddnull(hb, 4)) 177 | if err != nil { 178 | return "", err 179 | } 180 | 181 | return a32_to_base64([]uint32{ha[0], ha[2]}) 182 | } 183 | 184 | // getMPI returns the length encoded Int and the next slice. 185 | func getMPI(b []byte) (*big.Int, []byte) { 186 | p := new(big.Int) 187 | plen := (uint64(b[0])*256 + uint64(b[1]) + 7) >> 3 188 | p.SetBytes(b[2 : plen+2]) 189 | b = b[plen+2:] 190 | return p, b 191 | } 192 | 193 | // getRSAKey decodes the RSA Key from the byte slice b. 194 | func getRSAKey(b []byte) (*big.Int, *big.Int, *big.Int) { 195 | p, b := getMPI(b) 196 | q, b := getMPI(b) 197 | d, _ := getMPI(b) 198 | 199 | return p, q, d 200 | } 201 | 202 | // decryptRSA decrypts message m using RSA private key (p,q,d) 203 | func decryptRSA(m, p, q, d *big.Int) []byte { 204 | n := new(big.Int) 205 | r := new(big.Int) 206 | n.Mul(p, q) 207 | r.Exp(m, d, n) 208 | 209 | return r.Bytes() 210 | } 211 | 212 | // blockDecrypt decrypts using the block cipher blk in ECB mode. 213 | func blockDecrypt(blk cipher.Block, dst, src []byte) error { 214 | 215 | if len(src) > len(dst) || len(src)%blk.BlockSize() != 0 { 216 | return errors.New("Block decryption failed") 217 | } 218 | 219 | l := len(src) - blk.BlockSize() 220 | 221 | for i := 0; i <= l; i += blk.BlockSize() { 222 | blk.Decrypt(dst[i:], src[i:]) 223 | } 224 | 225 | return nil 226 | } 227 | 228 | // blockEncrypt encrypts using the block cipher blk in ECB mode. 229 | func blockEncrypt(blk cipher.Block, dst, src []byte) error { 230 | 231 | if len(src) > len(dst) || len(src)%blk.BlockSize() != 0 { 232 | return errors.New("Block encryption failed") 233 | } 234 | 235 | l := len(src) - blk.BlockSize() 236 | 237 | for i := 0; i <= l; i += blk.BlockSize() { 238 | blk.Encrypt(dst[i:], src[i:]) 239 | } 240 | 241 | return nil 242 | } 243 | 244 | // decryptSeessionId decrypts the session id using the given private 245 | // key. 246 | func decryptSessionId(privk string, csid string, mk []byte) (string, error) { 247 | 248 | block, err := aes.NewCipher(mk) 249 | if err != nil { 250 | return "", err 251 | } 252 | pk, err := base64urldecode(privk) 253 | if err != nil { 254 | return "", err 255 | } 256 | err = blockDecrypt(block, pk, pk) 257 | if err != nil { 258 | return "", err 259 | } 260 | 261 | c, err := base64urldecode(csid) 262 | if err != nil { 263 | return "", err 264 | } 265 | 266 | m, _ := getMPI(c) 267 | 268 | p, q, d := getRSAKey(pk) 269 | r := decryptRSA(m, p, q, d) 270 | 271 | return base64urlencode(r[:43]), nil 272 | 273 | } 274 | 275 | // chunkSize describes a size and position of chunk 276 | type chunkSize struct { 277 | position int64 278 | size int 279 | } 280 | 281 | func getChunkSizes(size int64) (chunks []chunkSize) { 282 | p := int64(0) 283 | for i := 1; size > 0; i++ { 284 | var chunk int 285 | if i <= 8 { 286 | chunk = i * 131072 287 | } else { 288 | chunk = 1048576 289 | } 290 | if size < int64(chunk) { 291 | chunk = int(size) 292 | } 293 | chunks = append(chunks, chunkSize{position: p, size: chunk}) 294 | p += int64(chunk) 295 | size -= int64(chunk) 296 | } 297 | return chunks 298 | } 299 | 300 | var attrMatch = regexp.MustCompile(`{".*"}`) 301 | 302 | func decryptAttr(key []byte, data string) (attr FileAttr, err error) { 303 | block, err := aes.NewCipher(key) 304 | if err != nil { 305 | return attr, err 306 | } 307 | iv, err := a32_to_bytes([]uint32{0, 0, 0, 0}) 308 | if err != nil { 309 | return attr, err 310 | } 311 | mode := cipher.NewCBCDecrypter(block, iv) 312 | buf := make([]byte, len(data)) 313 | ddata, err := base64urldecode(data) 314 | if err != nil { 315 | return attr, err 316 | } 317 | mode.CryptBlocks(buf, ddata) 318 | 319 | if string(buf[:4]) == "MEGA" { 320 | str := strings.TrimRight(string(buf[4:]), "\x00") 321 | trimmed := attrMatch.FindString(str) 322 | if trimmed != "" { 323 | str = trimmed 324 | } 325 | err = json.Unmarshal([]byte(str), &attr) 326 | } 327 | return attr, err 328 | } 329 | 330 | func encryptAttr(key []byte, attr FileAttr) (b string, err error) { 331 | block, err := aes.NewCipher(key) 332 | if err != nil { 333 | return "", err 334 | } 335 | data, err := json.Marshal(attr) 336 | if err != nil { 337 | return "", err 338 | } 339 | attrib := []byte("MEGA") 340 | attrib = append(attrib, data...) 341 | attrib = paddnull(attrib, 16) 342 | 343 | iv, err := a32_to_bytes([]uint32{0, 0, 0, 0}) 344 | if err != nil { 345 | return "", err 346 | } 347 | mode := cipher.NewCBCEncrypter(block, iv) 348 | mode.CryptBlocks(attrib, attrib) 349 | 350 | b = base64urlencode(attrib) 351 | return b, nil 352 | } 353 | 354 | func randString(l int) (string, error) { 355 | encoding := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/+" 356 | b := make([]byte, l) 357 | _, err := rand.Read(b) 358 | if err != nil { 359 | return "", err 360 | } 361 | enc := base64.NewEncoding(encoding) 362 | d := make([]byte, enc.EncodedLen(len(b))) 363 | enc.Encode(d, b) 364 | d = d[:l] 365 | return strings.NewReplacer("/", "A", "+", "B").Replace(string(d)), nil 366 | } 367 | 368 | // halfCPUCores returns half the number of logical CPU cores available. 369 | // The return value is always at least 1, even if the system reports fewer than 2 cores. 370 | func halfCPUCores() int { 371 | n := runtime.NumCPU() / 2 372 | if n < 1 { 373 | return 1 374 | } 375 | return n 376 | } 377 | -------------------------------------------------------------------------------- /mega_test.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "crypto/md5" 5 | "crypto/rand" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "os" 10 | "path/filepath" 11 | "sync" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | var USER string = os.Getenv("X_MEGA_USER") 17 | var PASSWORD string = os.Getenv("X_MEGA_PASSWORD") 18 | 19 | // retry runs fn until it succeeds, using what to log and retrying on 20 | // EAGAIN. It uses exponential backoff 21 | func retry(t *testing.T, what string, fn func() error) { 22 | const maxTries = 10 23 | var err error 24 | sleep := 100 * time.Millisecond 25 | for i := 1; i <= maxTries; i++ { 26 | err = fn() 27 | if err == nil { 28 | return 29 | } 30 | if err != EAGAIN { 31 | break 32 | } 33 | t.Logf("%s failed %d/%d - retrying after %v sleep", what, i, maxTries, sleep) 34 | time.Sleep(sleep) 35 | sleep *= 2 36 | } 37 | t.Fatalf("%s failed: %v", what, err) 38 | } 39 | 40 | func skipIfNoCredentials(t *testing.T) { 41 | if USER == "" || PASSWORD == "" { 42 | t.Skip("X_MEGA_USER and X_MEGA_PASSWD not set - skipping integration tests") 43 | } 44 | } 45 | 46 | func initSession(t *testing.T) *Mega { 47 | skipIfNoCredentials(t) 48 | m := New() 49 | // m.SetDebugger(log.Printf) 50 | retry(t, "Login", func() error { 51 | return m.Login(USER, PASSWORD) 52 | }) 53 | return m 54 | } 55 | 56 | // createFile creates a temporary file of a given size along with its MD5SUM 57 | func createFile(t *testing.T, size int64) (string, string) { 58 | b := make([]byte, size) 59 | _, err := rand.Read(b) 60 | if err != nil { 61 | t.Fatalf("Error reading rand: %v", err) 62 | } 63 | file, err := os.CreateTemp(os.TempDir(), "gomega-") 64 | if err != nil { 65 | t.Fatalf("Error creating temp file: %v", err) 66 | } 67 | defer func() { 68 | err := file.Close() 69 | if err != nil { 70 | t.Fatalf("Error closing temp file: %v", err) 71 | } 72 | }() 73 | _, err = file.Write(b) 74 | if err != nil { 75 | t.Fatalf("Error writing temp file: %v", err) 76 | } 77 | h := md5.New() 78 | _, err = h.Write(b) 79 | if err != nil { 80 | t.Fatalf("Error on Write while writing temp file: %v", err) 81 | } 82 | return file.Name(), fmt.Sprintf("%x", h.Sum(nil)) 83 | } 84 | 85 | // uploadFile uploads a temporary file of a given size returning the 86 | // node, name and its MD5SUM 87 | func uploadFile(t *testing.T, session *Mega, size int64, parent *Node) (node *Node, name string, md5sum string) { 88 | name, md5sum = createFile(t, size) 89 | defer func() { 90 | _ = os.Remove(name) 91 | }() 92 | var err error 93 | retry(t, fmt.Sprintf("Upload %q", name), func() error { 94 | node, err = session.UploadFile(name, parent, "", nil) 95 | return err 96 | }) 97 | if node == nil { 98 | t.Fatalf("Failed to obtain node after upload for %q", name) 99 | } 100 | return node, name, md5sum 101 | } 102 | 103 | // createDir creates a directory under parent 104 | func createDir(t *testing.T, session *Mega, name string, parent *Node) (node *Node) { 105 | var err error 106 | retry(t, fmt.Sprintf("Create directory %q", name), func() error { 107 | node, err = session.CreateDir(name, parent) 108 | return err 109 | }) 110 | return node 111 | } 112 | 113 | func fileMD5(t *testing.T, name string) string { 114 | file, err := os.Open(name) 115 | if err != nil { 116 | t.Fatalf("Failed to open %q: %v", name, err) 117 | } 118 | defer func() { 119 | err := file.Close() 120 | if err != nil { 121 | t.Fatalf("Error closing temp file: %v", err) 122 | } 123 | }() 124 | b, err := io.ReadAll(file) 125 | if err != nil { 126 | t.Fatalf("Failed to read all %q: %v", name, err) 127 | } 128 | h := md5.New() 129 | _, err = h.Write(b) 130 | if err != nil { 131 | t.Fatalf("Error on hash in fileMD5: %v", err) 132 | } 133 | return fmt.Sprintf("%x", h.Sum(nil)) 134 | } 135 | 136 | func TestLogin(t *testing.T) { 137 | skipIfNoCredentials(t) 138 | 139 | m := New() 140 | retry(t, "Login", func() error { 141 | return m.Login(USER, PASSWORD) 142 | }) 143 | } 144 | 145 | func TestGetUser(t *testing.T) { 146 | session := initSession(t) 147 | _, err := session.GetUser() 148 | if err != nil { 149 | t.Fatal("GetUser failed", err) 150 | } 151 | } 152 | 153 | func TestUploadDownload(t *testing.T) { 154 | session := initSession(t) 155 | for i := range []int{0, 1} { 156 | if i == 0 { 157 | t.Log("HTTP Test") 158 | session.SetHTTPS(false) 159 | } else { 160 | t.Log("HTTPS Test") 161 | session.SetHTTPS(true) 162 | } 163 | 164 | node, name, h1 := uploadFile(t, session, 314573, session.FS.root) 165 | 166 | session.FS.mutex.Lock() 167 | phash := session.FS.root.hash 168 | n := session.FS.lookup[node.hash] 169 | if n.parent.hash != phash { 170 | t.Error("Parent of uploaded file mismatch") 171 | } 172 | session.FS.mutex.Unlock() 173 | 174 | err := session.DownloadFile(node, name, nil) 175 | if err != nil { 176 | t.Fatal("Download failed", err) 177 | } 178 | 179 | h2 := fileMD5(t, name) 180 | err = os.Remove(name) 181 | if err != nil { 182 | t.Error("Failed to remove file", err) 183 | } 184 | 185 | if h1 != h2 { 186 | t.Error("MD5 mismatch for downloaded file") 187 | } 188 | } 189 | session.SetHTTPS(false) 190 | } 191 | 192 | func TestMove(t *testing.T) { 193 | session := initSession(t) 194 | node, _, _ := uploadFile(t, session, 31, session.FS.root) 195 | 196 | hash := node.hash 197 | phash := session.FS.trash.hash 198 | err := session.Move(node, session.FS.trash) 199 | if err != nil { 200 | t.Fatal("Move failed", err) 201 | } 202 | 203 | session.FS.mutex.Lock() 204 | n := session.FS.lookup[hash] 205 | if n.parent.hash != phash { 206 | t.Error("Move happened to wrong parent", phash, n.parent.hash) 207 | } 208 | session.FS.mutex.Unlock() 209 | } 210 | 211 | func TestRename(t *testing.T) { 212 | session := initSession(t) 213 | node, _, _ := uploadFile(t, session, 31, session.FS.root) 214 | 215 | err := session.Rename(node, "newname.txt") 216 | if err != nil { 217 | t.Fatal("Rename failed", err) 218 | } 219 | 220 | session.FS.mutex.Lock() 221 | newname := session.FS.lookup[node.hash].name 222 | if newname != "newname.txt" { 223 | t.Error("Renamed to wrong name", newname) 224 | } 225 | session.FS.mutex.Unlock() 226 | } 227 | 228 | func TestDelete(t *testing.T) { 229 | session := initSession(t) 230 | node, _, _ := uploadFile(t, session, 31, session.FS.root) 231 | 232 | retry(t, "Soft delete", func() error { 233 | return session.Delete(node, false) 234 | }) 235 | 236 | session.FS.mutex.Lock() 237 | node = session.FS.lookup[node.hash] 238 | if node.parent != session.FS.trash { 239 | t.Error("Expects file to be moved to trash") 240 | } 241 | session.FS.mutex.Unlock() 242 | 243 | retry(t, "Hard delete", func() error { 244 | return session.Delete(node, true) 245 | }) 246 | 247 | time.Sleep(5 * time.Second) // wait for the event 248 | 249 | session.FS.mutex.Lock() 250 | if _, ok := session.FS.lookup[node.hash]; ok { 251 | t.Error("Expects file to be disappeared") 252 | } 253 | session.FS.mutex.Unlock() 254 | } 255 | 256 | func TestCreateDir(t *testing.T) { 257 | session := initSession(t) 258 | node := createDir(t, session, "testdir1", session.FS.root) 259 | node2 := createDir(t, session, "testdir2", node) 260 | 261 | session.FS.mutex.Lock() 262 | nnode2 := session.FS.lookup[node2.hash] 263 | if nnode2.parent.hash != node.hash { 264 | t.Error("Wrong directory parent") 265 | } 266 | session.FS.mutex.Unlock() 267 | } 268 | 269 | func TestConfig(t *testing.T) { 270 | skipIfNoCredentials(t) 271 | 272 | m := New() 273 | m.SetAPIUrl("http://invalid.domain") 274 | err := m.Login(USER, PASSWORD) 275 | if err == nil { 276 | t.Error("API Url: Expected failure") 277 | } 278 | 279 | err = m.SetDownloadWorkers(100) 280 | if err != EWORKER_LIMIT_EXCEEDED { 281 | t.Error("Download: Expected EWORKER_LIMIT_EXCEEDED error") 282 | } 283 | 284 | err = m.SetUploadWorkers(100) 285 | if err != EWORKER_LIMIT_EXCEEDED { 286 | t.Error("Upload: Expected EWORKER_LIMIT_EXCEEDED error") 287 | } 288 | 289 | // TODO: Add timeout test cases 290 | 291 | } 292 | 293 | func TestPathLookup(t *testing.T) { 294 | session := initSession(t) 295 | 296 | rs, err := randString(5) 297 | if err != nil { 298 | t.Fatalf("failed to make random string: %v", err) 299 | } 300 | node1 := createDir(t, session, "dir-1-"+rs, session.FS.root) 301 | node21 := createDir(t, session, "dir-2-1-"+rs, node1) 302 | node22 := createDir(t, session, "dir-2-2-"+rs, node1) 303 | node31 := createDir(t, session, "dir-3-1-"+rs, node21) 304 | node32 := createDir(t, session, "dir-3-2-"+rs, node22) 305 | _ = node32 306 | 307 | _, name1, _ := uploadFile(t, session, 31, node31) 308 | _, _, _ = uploadFile(t, session, 31, node31) 309 | _, name3, _ := uploadFile(t, session, 31, node22) 310 | 311 | testpaths := [][]string{ 312 | {"dir-1-" + rs, "dir-2-2-" + rs, filepath.Base(name3)}, 313 | {"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs}, 314 | {"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs, filepath.Base(name1)}, 315 | {"dir-1-" + rs, "dir-2-1-" + rs, "none"}, 316 | } 317 | 318 | results := []error{nil, nil, nil, ENOENT} 319 | 320 | //time.Sleep(5 * time.Second) // wait for the events to be processed 321 | 322 | for i, tst := range testpaths { 323 | //t.Logf("Test %d: Lookup %v", i, tst) 324 | ns, e := session.FS.PathLookup(session.FS.root, tst) 325 | switch { 326 | case !errors.Is(e, results[i]): 327 | t.Errorf("Test %d failed: wrong result, %v", i, tst) 328 | default: 329 | if results[i] == nil && len(tst) != len(ns) { 330 | t.Errorf("Test %d failed: result array len (%d) mismatch", i, len(ns)) 331 | } 332 | 333 | arr := []string{} 334 | for n := range ns { 335 | if tst[n] != ns[n].name { 336 | t.Errorf("Test %d failed: result node mismatches (%v) and (%v)", i, tst, arr) 337 | break 338 | } 339 | arr = append(arr, tst[n]) 340 | } 341 | } 342 | } 343 | } 344 | 345 | func TestEventNotify(t *testing.T) { 346 | session1 := initSession(t) 347 | session2 := initSession(t) 348 | 349 | node, _, _ := uploadFile(t, session1, 31, session1.FS.root) 350 | nodeHash := node.GetHash() // Store the hash before the loop 351 | 352 | for i := 0; i < 60; i++ { 353 | time.Sleep(time.Second * 2) 354 | node = session2.FS.HashLookup(nodeHash) // Use the stored hash 355 | if node != nil { 356 | break 357 | } 358 | } 359 | 360 | if node == nil { 361 | t.Fatal("Expects file to found in second client's FS") 362 | } 363 | 364 | retry(t, "Delete", func() error { 365 | return session2.Delete(node, true) 366 | }) 367 | 368 | for i := 0; i < 20; i++ { 369 | node = session1.FS.HashLookup(node.GetHash()) 370 | if node == nil { 371 | break 372 | } 373 | time.Sleep(time.Second * 5) 374 | } 375 | if node != nil { 376 | t.Fatal("Expects file to not-found in first client's FS") 377 | } 378 | } 379 | 380 | func TestExportLink(t *testing.T) { 381 | session := initSession(t) 382 | node, _, _ := uploadFile(t, session, 31, session.FS.root) 383 | 384 | // Don't include decryption key 385 | retry(t, "Failed to export link (key not included)", func() error { 386 | _, err := session.Link(node, false) 387 | return err 388 | }) 389 | 390 | // Do include decryption key 391 | retry(t, "Failed to export link (key included)", func() error { 392 | _, err := session.Link(node, true) 393 | return err 394 | }) 395 | } 396 | 397 | func TestWaitEvents(t *testing.T) { 398 | m := &Mega{} 399 | m.SetLogger(t.Logf) 400 | m.SetDebugger(t.Logf) 401 | var wg sync.WaitGroup 402 | // in the background fire the event timer after 100mS 403 | wg.Add(1) 404 | go func() { 405 | time.Sleep(100 * time.Millisecond) 406 | m.waitEventsFire() 407 | wg.Done() 408 | }() 409 | wait := func(d time.Duration, pb *bool) { 410 | e := m.WaitEventsStart() 411 | *pb = m.WaitEvents(e, d) 412 | wg.Done() 413 | } 414 | // wait for each event in a separate goroutine 415 | var b1, b2, b3 bool 416 | wg.Add(3) 417 | go wait(10*time.Second, &b1) 418 | go wait(2*time.Second, &b2) 419 | go wait(1*time.Millisecond, &b3) 420 | wg.Wait() 421 | if b1 != false { 422 | t.Errorf("Unexpected timeout for b1") 423 | } 424 | if b2 != false { 425 | t.Errorf("Unexpected timeout for b2") 426 | } 427 | if b3 != true { 428 | t.Errorf("Unexpected event for b3") 429 | } 430 | if m.waitEvents != nil { 431 | t.Errorf("Expecting waitEvents to be empty") 432 | } 433 | // Check nothing happens if we fire the event with no listeners 434 | m.waitEventsFire() 435 | } 436 | -------------------------------------------------------------------------------- /mega.go: -------------------------------------------------------------------------------- 1 | package mega 2 | 3 | import ( 4 | "bytes" 5 | "crypto/aes" 6 | "crypto/cipher" 7 | "crypto/rand" 8 | "crypto/sha512" 9 | "encoding/json" 10 | "errors" 11 | "fmt" 12 | "io" 13 | "log" 14 | "math/big" 15 | mrand "math/rand" 16 | "net/http" 17 | "os" 18 | "path/filepath" 19 | "strings" 20 | "sync" 21 | "time" 22 | 23 | "golang.org/x/crypto/pbkdf2" 24 | ) 25 | 26 | // Default settings 27 | const ( 28 | API_URL = "https://g.api.mega.co.nz" 29 | BASE_DOWNLOAD_URL = "https://mega.co.nz" 30 | RETRIES = 10 31 | DOWNLOAD_WORKERS = 3 32 | MAX_DOWNLOAD_WORKERS = 30 33 | UPLOAD_WORKERS = 1 34 | MAX_UPLOAD_WORKERS = 30 35 | TIMEOUT = time.Second * 10 36 | HTTPSONLY = false 37 | minSleepTime = 10 * time.Millisecond // for retries 38 | maxSleepTime = 5 * time.Second // for retries 39 | X_MEGA_USER_AGENT = "" // custom user agent string. Not set if empty 40 | HASHCASH_CHALLENGE_TIMEOUT = time.Minute * 5 // time limit to solve hashcash challenge 41 | ) 42 | 43 | type config struct { 44 | baseurl string 45 | retries int 46 | dl_workers int 47 | ul_workers int 48 | timeout time.Duration 49 | https bool 50 | } 51 | 52 | func newConfig() config { 53 | return config{ 54 | baseurl: getAPIBaseURL(), 55 | retries: RETRIES, 56 | dl_workers: DOWNLOAD_WORKERS, 57 | ul_workers: UPLOAD_WORKERS, 58 | timeout: TIMEOUT, 59 | https: HTTPSONLY, 60 | } 61 | } 62 | 63 | // Set mega service base url 64 | func (c *config) SetAPIUrl(u string) { 65 | if strings.HasSuffix(u, "/") { 66 | u = strings.TrimRight(u, "/") 67 | } 68 | c.baseurl = u 69 | } 70 | 71 | // Set number of retries for api calls 72 | func (c *config) SetRetries(r int) { 73 | c.retries = r 74 | } 75 | 76 | // Set concurrent download workers 77 | func (c *config) SetDownloadWorkers(w int) error { 78 | if w <= MAX_DOWNLOAD_WORKERS { 79 | c.dl_workers = w 80 | return nil 81 | } 82 | 83 | return EWORKER_LIMIT_EXCEEDED 84 | } 85 | 86 | // Set connection timeout 87 | func (c *config) SetTimeOut(t time.Duration) { 88 | c.timeout = t 89 | } 90 | 91 | // Set concurrent upload workers 92 | func (c *config) SetUploadWorkers(w int) error { 93 | if w <= MAX_UPLOAD_WORKERS { 94 | c.ul_workers = w 95 | return nil 96 | } 97 | 98 | return EWORKER_LIMIT_EXCEEDED 99 | } 100 | 101 | // Set use https for transfers 102 | func (c *config) SetHTTPS(e bool) { 103 | c.https = e 104 | } 105 | 106 | type Mega struct { 107 | config 108 | // Version of the account 109 | accountVersion int 110 | // Salt for the account if accountVersion > 1 111 | accountSalt []byte 112 | // Sequence number 113 | sn int64 114 | // Server state sn 115 | ssn string 116 | // Session ID 117 | sid string 118 | // Master key 119 | k []byte 120 | // User handle 121 | uh []byte 122 | // Filesystem object 123 | FS *MegaFS 124 | // HTTP Client 125 | client *http.Client 126 | // Loggers 127 | logf func(format string, v ...any) 128 | debugf func(format string, v ...any) 129 | // serialize the API requests 130 | apiMu sync.Mutex 131 | // mutex to protext waitEvents 132 | waitEventsMu sync.Mutex 133 | // Outstanding channels to close to indicate events all received 134 | waitEvents []chan struct{} 135 | } 136 | 137 | // Filesystem node types 138 | const ( 139 | FILE = 0 140 | FOLDER = 1 141 | ROOT = 2 142 | INBOX = 3 143 | TRASH = 4 144 | ) 145 | 146 | // Filesystem node 147 | type Node struct { 148 | fs *MegaFS 149 | name string 150 | hash string 151 | parent *Node 152 | children []*Node 153 | ntype int 154 | size int64 155 | ts time.Time 156 | meta NodeMeta 157 | } 158 | 159 | func (n *Node) removeChild(c *Node) bool { 160 | index := -1 161 | for i, v := range n.children { 162 | if v.hash == c.hash { 163 | index = i 164 | break 165 | } 166 | } 167 | 168 | if index >= 0 { 169 | n.children[index] = n.children[len(n.children)-1] 170 | n.children = n.children[:len(n.children)-1] 171 | return true 172 | } 173 | 174 | return false 175 | } 176 | 177 | func (n *Node) addChild(c *Node) { 178 | if n != nil { 179 | n.children = append(n.children, c) 180 | } 181 | } 182 | 183 | func (n *Node) getChildren() []*Node { 184 | return n.children 185 | } 186 | 187 | func (n *Node) GetType() int { 188 | n.fs.mutex.Lock() 189 | defer n.fs.mutex.Unlock() 190 | return n.ntype 191 | } 192 | 193 | func (n *Node) GetSize() int64 { 194 | n.fs.mutex.Lock() 195 | defer n.fs.mutex.Unlock() 196 | return n.size 197 | } 198 | 199 | func (n *Node) GetTimeStamp() time.Time { 200 | n.fs.mutex.Lock() 201 | defer n.fs.mutex.Unlock() 202 | return n.ts 203 | } 204 | 205 | func (n *Node) GetName() string { 206 | n.fs.mutex.Lock() 207 | defer n.fs.mutex.Unlock() 208 | return n.name 209 | } 210 | 211 | func (n *Node) GetHash() string { 212 | n.fs.mutex.Lock() 213 | defer n.fs.mutex.Unlock() 214 | return n.hash 215 | } 216 | 217 | type NodeMeta struct { 218 | key []byte 219 | compkey []byte 220 | iv []byte 221 | mac []byte 222 | } 223 | 224 | // Mega filesystem object 225 | type MegaFS struct { 226 | root *Node 227 | trash *Node 228 | inbox *Node 229 | sroots []*Node 230 | lookup map[string]*Node 231 | skmap map[string]string 232 | mutex sync.Mutex 233 | } 234 | 235 | // Get filesystem root node 236 | func (fs *MegaFS) GetRoot() *Node { 237 | fs.mutex.Lock() 238 | defer fs.mutex.Unlock() 239 | return fs.root 240 | } 241 | 242 | // Get filesystem trash node 243 | func (fs *MegaFS) GetTrash() *Node { 244 | fs.mutex.Lock() 245 | defer fs.mutex.Unlock() 246 | return fs.trash 247 | } 248 | 249 | // Get inbox node 250 | func (fs *MegaFS) GetInbox() *Node { 251 | fs.mutex.Lock() 252 | defer fs.mutex.Unlock() 253 | return fs.inbox 254 | } 255 | 256 | // Get a node pointer from its hash 257 | func (fs *MegaFS) HashLookup(h string) *Node { 258 | fs.mutex.Lock() 259 | defer fs.mutex.Unlock() 260 | 261 | return fs.hashLookup(h) 262 | } 263 | 264 | func (fs *MegaFS) hashLookup(h string) *Node { 265 | if node, ok := fs.lookup[h]; ok { 266 | return node 267 | } 268 | 269 | return nil 270 | } 271 | 272 | // Get the list of child nodes for a given node 273 | func (fs *MegaFS) GetChildren(n *Node) ([]*Node, error) { 274 | fs.mutex.Lock() 275 | defer fs.mutex.Unlock() 276 | 277 | var empty []*Node 278 | 279 | if n == nil { 280 | return empty, EARGS 281 | } 282 | 283 | node := fs.hashLookup(n.hash) 284 | if node == nil { 285 | return empty, ENOENT 286 | } 287 | 288 | return node.getChildren(), nil 289 | } 290 | 291 | // Retrieve all the nodes in the given node tree path by name 292 | // This method returns array of nodes upto the matched subpath 293 | // (in same order as input names array) even if the target node is not located. 294 | func (fs *MegaFS) PathLookup(root *Node, ns []string) ([]*Node, error) { 295 | fs.mutex.Lock() 296 | defer fs.mutex.Unlock() 297 | 298 | if root == nil { 299 | return nil, EARGS 300 | } 301 | 302 | var err error 303 | var found = true 304 | 305 | nodepath := []*Node{} 306 | 307 | children := root.children 308 | for _, name := range ns { 309 | found = false 310 | for _, n := range children { 311 | if n.name == name { 312 | nodepath = append(nodepath, n) 313 | children = n.children 314 | found = true 315 | break 316 | } 317 | } 318 | 319 | if !found { 320 | break 321 | } 322 | } 323 | 324 | if !found { 325 | err = ENOENT 326 | } 327 | 328 | return nodepath, err 329 | } 330 | 331 | // Get top level directory nodes shared by other users 332 | func (fs *MegaFS) GetSharedRoots() []*Node { 333 | fs.mutex.Lock() 334 | defer fs.mutex.Unlock() 335 | return fs.sroots 336 | } 337 | 338 | func newMegaFS() *MegaFS { 339 | fs := &MegaFS{ 340 | lookup: make(map[string]*Node), 341 | skmap: make(map[string]string), 342 | } 343 | return fs 344 | } 345 | 346 | func New() *Mega { 347 | max := big.NewInt(0x100000000) 348 | bigx, err := rand.Int(rand.Reader, max) 349 | if err != nil { 350 | panic(err) // this should be returned, but this is a public interface 351 | } 352 | cfg := newConfig() 353 | mgfs := newMegaFS() 354 | m := &Mega{ 355 | config: cfg, 356 | sn: bigx.Int64(), 357 | FS: mgfs, 358 | client: newHttpClient(cfg.timeout), 359 | } 360 | m.SetLogger(log.Printf) 361 | m.SetDebugger(nil) 362 | return m 363 | } 364 | 365 | // SetClient sets the HTTP client in use 366 | func (m *Mega) SetClient(client *http.Client) *Mega { 367 | m.client = client 368 | return m 369 | } 370 | 371 | // discardLogf discards the log messages 372 | func discardLogf(format string, v ...any) { 373 | } 374 | 375 | // Returns an opaque string representing the session 376 | func (m *Mega) GetSessionID() string { 377 | return m.sid 378 | } 379 | 380 | func (m *Mega) GetMasterKey() []byte { 381 | return m.k 382 | } 383 | 384 | // "Login" using the session ID (for API auth) and master key (for decryption). Alternative to logging in with username/password 385 | // This can be used to import back a session exported with GetSessionID and GetMasterKey without requiring the password again 386 | func (m *Mega) LoginWithKeys(sessionId string, masterKey []byte) error { 387 | m.sid = sessionId 388 | m.k = masterKey 389 | return m.postAuthInit() 390 | } 391 | 392 | // SetLogger sets the logger for important messages. By default this 393 | // is log.Printf. Use nil to discard the messages. 394 | func (m *Mega) SetLogger(logf func(format string, v ...any)) *Mega { 395 | if logf == nil { 396 | logf = discardLogf 397 | } 398 | m.logf = logf 399 | return m 400 | } 401 | 402 | // SetDebugger sets the logger for debug messages. By default these 403 | // messages are not output. 404 | func (m *Mega) SetDebugger(debugf func(format string, v ...any)) *Mega { 405 | if debugf == nil { 406 | debugf = discardLogf 407 | } 408 | m.debugf = debugf 409 | return m 410 | } 411 | 412 | // backOffSleep sleeps for the time pointed to then adjusts it by 413 | // doubling it up to a maximum of maxSleepTime. 414 | // 415 | // This produces a truncated exponential backoff sleep 416 | func backOffSleep(pt *time.Duration) { 417 | time.Sleep(*pt) 418 | *pt *= 2 419 | if *pt > maxSleepTime { 420 | *pt = maxSleepTime 421 | } 422 | } 423 | 424 | // API request method 425 | func (m *Mega) api_request(r []byte) (buf []byte, err error) { 426 | var resp *http.Response 427 | // serialize the API requests 428 | m.apiMu.Lock() 429 | defer func() { 430 | m.sn++ 431 | m.apiMu.Unlock() 432 | }() 433 | 434 | url := fmt.Sprintf("%s/cs?id=%d", m.baseurl, m.sn) 435 | 436 | if m.sid != "" { 437 | url = fmt.Sprintf("%s&sid=%s", url, m.sid) 438 | } 439 | 440 | sleepTime := minSleepTime // initial backoff time 441 | for i := 0; i < m.retries+1; i++ { 442 | if i != 0 { 443 | m.debugf("Retry API request %d/%d: %v", i, m.retries, err) 444 | backOffSleep(&sleepTime) 445 | } 446 | 447 | // Create request 448 | req, err := http.NewRequest("POST", url, bytes.NewBuffer(r)) 449 | if err != nil { 450 | continue 451 | } 452 | addRequestHeaders(req) 453 | 454 | // Send request 455 | resp, err = m.client.Do(req) 456 | if err != nil { 457 | continue 458 | } 459 | 460 | // Handle 402 Payment Required status with hashcash challenge 461 | if resp.StatusCode == 402 { 462 | sleepTime = minSleepTime // reset exp backoff time 463 | hashCashHeader := resp.Header.Get("X-Hashcash") 464 | if hashCashHeader == "" { 465 | _ = resp.Body.Close() 466 | continue 467 | } 468 | 469 | // Parse hashcash header 470 | easiness, token, valid := parseHashcash(hashCashHeader) 471 | if !valid { 472 | _ = resp.Body.Close() 473 | continue 474 | } 475 | 476 | // Close the current response before making a new request 477 | _ = resp.Body.Close() 478 | 479 | // Generate hashcash response 480 | cashValue, err := solveHashCashChallenge(token, easiness, HASHCASH_CHALLENGE_TIMEOUT, halfCPUCores()) 481 | if err != nil { 482 | m.debugf("Failed to solve hashcash challenge: %v", err) 483 | continue 484 | } 485 | if cashValue == "" { 486 | m.debugf("Failed to solve hashcash challenge: empty cash value") 487 | continue 488 | } 489 | 490 | // Create a new request with the hashcash header 491 | req, err = http.NewRequest("POST", url, bytes.NewBuffer(r)) 492 | if err != nil { 493 | continue 494 | } 495 | addHashCashRequestHeaders(req, token, cashValue) 496 | // Send the new request 497 | resp, err = m.client.Do(req) 498 | if err != nil { 499 | continue 500 | } 501 | 502 | // If still getting 402, give up this attempt and retry 503 | if resp.StatusCode == 402 { 504 | _ = resp.Body.Close() 505 | continue 506 | } 507 | } 508 | 509 | if resp.StatusCode != 200 { 510 | // err must be not-nil on a continue 511 | _ = resp.Body.Close() 512 | continue 513 | } 514 | 515 | buf, err = io.ReadAll(resp.Body) 516 | if err != nil { 517 | _ = resp.Body.Close() 518 | continue 519 | } 520 | err = resp.Body.Close() 521 | if err != nil { 522 | continue 523 | } 524 | 525 | // at this point the body is read and closed 526 | 527 | if !bytes.HasPrefix(buf, []byte("[")) && !bytes.HasPrefix(buf, []byte("-")) { 528 | return nil, EBADRESP 529 | } 530 | 531 | if len(buf) < 6 { 532 | var emsg [1]ErrorMsg 533 | err = json.Unmarshal(buf, &emsg) 534 | if err != nil { 535 | err = json.Unmarshal(buf, &emsg[0]) 536 | } 537 | if err != nil { 538 | return buf, EBADRESP 539 | } 540 | err = parseError(emsg[0]) 541 | if err == EAGAIN { 542 | continue 543 | } 544 | return buf, err 545 | } 546 | 547 | if err == nil { 548 | return buf, nil 549 | } 550 | } 551 | 552 | return nil, err 553 | } 554 | 555 | // prelogin call 556 | func (m *Mega) prelogin(email string) error { 557 | var msg [1]PreloginMsg 558 | var res [1]PreloginResp 559 | 560 | email = strings.ToLower(email) // mega uses lowercased emails for login purposes - FIXME is this true for prelogin? 561 | 562 | msg[0].Cmd = "us0" 563 | msg[0].User = email 564 | 565 | req, err := json.Marshal(msg) 566 | if err != nil { 567 | return err 568 | } 569 | result, err := m.api_request(req) 570 | if err != nil { 571 | return err 572 | } 573 | 574 | err = json.Unmarshal(result, &res) 575 | if err != nil { 576 | return err 577 | } 578 | 579 | if res[0].Version == 0 { 580 | return errors.New("prelogin: no version returned") 581 | } else if res[0].Version > 2 { 582 | return fmt.Errorf("prelogin: version %d account not supported", res[0].Version) 583 | } else if res[0].Version == 2 { 584 | if len(res[0].Salt) == 0 { 585 | return errors.New("prelogin: no salt returned") 586 | } 587 | m.accountSalt, err = base64urldecode(res[0].Salt) 588 | if err != nil { 589 | return err 590 | } 591 | } 592 | m.accountVersion = res[0].Version 593 | 594 | return nil 595 | } 596 | 597 | // Authenticate and start a session 598 | func (m *Mega) login(email string, passwd string, multiFactor string) error { 599 | var msg [1]LoginMsg 600 | var res [1]LoginResp 601 | var err error 602 | var result []byte 603 | 604 | email = strings.ToLower(email) // mega uses lowercased emails for login purposes 605 | 606 | passkey, err := password_key(passwd) 607 | if err != nil { 608 | return err 609 | } 610 | uhandle, err := stringhash(email, passkey) 611 | if err != nil { 612 | return err 613 | } 614 | m.uh = make([]byte, len(uhandle)) 615 | copy(m.uh, uhandle) 616 | 617 | msg[0].Cmd = "us" 618 | msg[0].User = email 619 | msg[0].Mfa = multiFactor 620 | 621 | if m.accountVersion == 1 { 622 | msg[0].Handle = uhandle 623 | } else { 624 | const derivedKeyLength = 2 * aes.BlockSize 625 | derivedKey := pbkdf2.Key([]byte(passwd), m.accountSalt, 100000, derivedKeyLength, sha512.New) 626 | authKey := derivedKey[aes.BlockSize:] 627 | passkey = derivedKey[:aes.BlockSize] 628 | 629 | sessionKey := make([]byte, aes.BlockSize) 630 | _, err = rand.Read(sessionKey) 631 | if err != nil { 632 | return err 633 | } 634 | msg[0].Handle = base64urlencode(authKey) 635 | msg[0].SessionKey = base64urlencode(sessionKey) 636 | } 637 | 638 | req, err := json.Marshal(msg) 639 | if err != nil { 640 | return err 641 | } 642 | result, err = m.api_request(req) 643 | if err != nil { 644 | return err 645 | } 646 | 647 | err = json.Unmarshal(result, &res) 648 | if err != nil { 649 | return err 650 | } 651 | 652 | m.k, err = base64urldecode(res[0].Key) 653 | if err != nil { 654 | return err 655 | } 656 | cipher, err := aes.NewCipher(passkey) 657 | if err != nil { 658 | return err 659 | } 660 | cipher.Decrypt(m.k, m.k) 661 | m.sid, err = decryptSessionId(res[0].Privk, res[0].Csid, m.k) 662 | if err != nil { 663 | return err 664 | } 665 | return nil 666 | } 667 | 668 | // Authenticate and start a session 669 | func (m *Mega) Login(email string, passwd string) error { 670 | return m.MultiFactorLogin(email, passwd, "") 671 | } 672 | 673 | // MultiFactorLogin - Authenticate and start a session with 2FA 674 | func (m *Mega) MultiFactorLogin(email, passwd, multiFactor string) error { 675 | err := m.prelogin(email) 676 | if err != nil { 677 | return err 678 | } 679 | 680 | err = m.login(email, passwd, multiFactor) 681 | if err != nil { 682 | return err 683 | } 684 | 685 | return m.postAuthInit() 686 | } 687 | 688 | // Finish initializing the Mega client after Login*() 689 | func (m *Mega) postAuthInit() error { 690 | 691 | waitEvent := m.WaitEventsStart() 692 | 693 | err := m.getFileSystem() 694 | if err != nil { 695 | return err 696 | } 697 | 698 | // Wait until the all the pending events have been received 699 | m.WaitEvents(waitEvent, 5*time.Second) 700 | 701 | return nil 702 | } 703 | 704 | // WaitEventsStart - call this before you do the action which might 705 | // generate events then use the returned channel as a parameter to 706 | // WaitEvents to wait for the event(s) to be received. 707 | func (m *Mega) WaitEventsStart() <-chan struct{} { 708 | ch := make(chan struct{}) 709 | m.waitEventsMu.Lock() 710 | m.waitEvents = append(m.waitEvents, ch) 711 | m.waitEventsMu.Unlock() 712 | return ch 713 | } 714 | 715 | // WaitEvents waits for all outstanding events to be received for a 716 | // maximum of duration. eventChan should be a channel as returned 717 | // from WaitEventStart. 718 | // 719 | // If the timeout elapsed then it returns true otherwise false. 720 | func (m *Mega) WaitEvents(eventChan <-chan struct{}, duration time.Duration) (timedout bool) { 721 | m.debugf("Waiting for events to be finished for %v", duration) 722 | timer := time.NewTimer(duration) 723 | select { 724 | case <-eventChan: 725 | m.debugf("Events received") 726 | timedout = false 727 | case <-timer.C: 728 | m.debugf("Timeout waiting for events") 729 | timedout = true 730 | } 731 | timer.Stop() 732 | return timedout 733 | } 734 | 735 | // waitEventsFire - fire the wait event 736 | func (m *Mega) waitEventsFire() { 737 | m.waitEventsMu.Lock() 738 | if len(m.waitEvents) > 0 { 739 | m.debugf("Signalling events received") 740 | for _, ch := range m.waitEvents { 741 | close(ch) 742 | } 743 | m.waitEvents = nil 744 | } 745 | m.waitEventsMu.Unlock() 746 | } 747 | 748 | // Get user information 749 | func (m *Mega) GetUser() (UserResp, error) { 750 | var msg [1]UserMsg 751 | var res [1]UserResp 752 | 753 | msg[0].Cmd = "ug" 754 | 755 | req, err := json.Marshal(msg) 756 | if err != nil { 757 | return res[0], err 758 | } 759 | result, err := m.api_request(req) 760 | if err != nil { 761 | return res[0], err 762 | } 763 | 764 | err = json.Unmarshal(result, &res) 765 | return res[0], err 766 | } 767 | 768 | // Get quota information 769 | func (m *Mega) GetQuota() (QuotaResp, error) { 770 | var msg [1]QuotaMsg 771 | var res [1]QuotaResp 772 | 773 | msg[0].Cmd = "uq" 774 | msg[0].Xfer = 1 775 | msg[0].Strg = 1 776 | 777 | req, err := json.Marshal(msg) 778 | if err != nil { 779 | return res[0], err 780 | } 781 | result, err := m.api_request(req) 782 | if err != nil { 783 | return res[0], err 784 | } 785 | 786 | err = json.Unmarshal(result, &res) 787 | return res[0], err 788 | } 789 | 790 | // Add a node into filesystem 791 | func (m *Mega) addFSNode(itm FSNode) (*Node, error) { 792 | var compkey, key []uint32 793 | var attr FileAttr 794 | var node, parent *Node 795 | var err error 796 | 797 | master_aes, err := aes.NewCipher(m.k) 798 | if err != nil { 799 | return nil, err 800 | } 801 | 802 | switch { 803 | case itm.T == FOLDER || itm.T == FILE: 804 | args := strings.Split(itm.Key, ":") 805 | if len(args) < 2 { 806 | return nil, fmt.Errorf("not enough : in item.Key: %q", itm.Key) 807 | } 808 | itemUser, itemKey := args[0], args[1] 809 | itemKeyParts := strings.Split(itemKey, "/") 810 | if len(itemKeyParts) >= 2 { 811 | itemKey = itemKeyParts[0] 812 | // the other part is maybe a share key handle? 813 | } 814 | 815 | switch { 816 | // File or folder owned by current user 817 | case itemUser == itm.User: 818 | buf, err := base64urldecode(itemKey) 819 | if err != nil { 820 | return nil, err 821 | } 822 | err = blockDecrypt(master_aes, buf, buf) 823 | if err != nil { 824 | return nil, err 825 | } 826 | compkey, err = bytes_to_a32(buf) 827 | if err != nil { 828 | return nil, err 829 | } 830 | // Shared folder 831 | case itm.SUser != "" && itm.SKey != "": 832 | sk, err := base64urldecode(itm.SKey) 833 | if err != nil { 834 | return nil, err 835 | } 836 | err = blockDecrypt(master_aes, sk, sk) 837 | if err != nil { 838 | return nil, err 839 | } 840 | sk_aes, err := aes.NewCipher(sk) 841 | if err != nil { 842 | return nil, err 843 | } 844 | 845 | m.FS.skmap[itm.Hash] = itm.SKey 846 | buf, err := base64urldecode(itemKey) 847 | if err != nil { 848 | return nil, err 849 | } 850 | err = blockDecrypt(sk_aes, buf, buf) 851 | if err != nil { 852 | return nil, err 853 | } 854 | compkey, err = bytes_to_a32(buf) 855 | if err != nil { 856 | return nil, err 857 | } 858 | // Shared file 859 | default: 860 | k, ok := m.FS.skmap[itemUser] 861 | if !ok { 862 | return nil, errors.New("couldn't find decryption key for shared file") 863 | } 864 | b, err := base64urldecode(k) 865 | if err != nil { 866 | return nil, err 867 | } 868 | err = blockDecrypt(master_aes, b, b) 869 | if err != nil { 870 | return nil, err 871 | } 872 | block, err := aes.NewCipher(b) 873 | if err != nil { 874 | return nil, err 875 | } 876 | buf, err := base64urldecode(itemKey) 877 | if err != nil { 878 | return nil, err 879 | } 880 | err = blockDecrypt(block, buf, buf) 881 | if err != nil { 882 | return nil, err 883 | } 884 | compkey, err = bytes_to_a32(buf) 885 | if err != nil { 886 | return nil, err 887 | } 888 | } 889 | 890 | switch { 891 | case itm.T == FILE: 892 | if len(compkey) < 8 { 893 | m.logf("ignoring item: compkey too short (%d): %#v", len(compkey), itm) 894 | return nil, nil 895 | } 896 | key = []uint32{compkey[0] ^ compkey[4], compkey[1] ^ compkey[5], compkey[2] ^ compkey[6], compkey[3] ^ compkey[7]} 897 | default: 898 | key = compkey 899 | } 900 | 901 | bkey, err := a32_to_bytes(key) 902 | if err != nil { 903 | // FIXME: 904 | attr.Name = "BAD ATTRIBUTE" 905 | } else { 906 | attr, err = decryptAttr(bkey, itm.Attr) 907 | // FIXME: 908 | if err != nil { 909 | attr.Name = "BAD ATTRIBUTE" 910 | } 911 | } 912 | } 913 | 914 | n, ok := m.FS.lookup[itm.Hash] 915 | switch { 916 | case ok: 917 | node = n 918 | default: 919 | node = &Node{ 920 | fs: m.FS, 921 | ntype: itm.T, 922 | size: itm.Sz, 923 | ts: time.Unix(itm.Ts, 0), 924 | } 925 | 926 | m.FS.lookup[itm.Hash] = node 927 | } 928 | 929 | n, ok = m.FS.lookup[itm.Parent] 930 | switch { 931 | case ok: 932 | parent = n 933 | parent.removeChild(node) 934 | parent.addChild(node) 935 | default: 936 | parent = nil 937 | if itm.Parent != "" { 938 | parent = &Node{ 939 | fs: m.FS, 940 | children: []*Node{node}, 941 | ntype: FOLDER, 942 | } 943 | m.FS.lookup[itm.Parent] = parent 944 | } 945 | } 946 | 947 | switch { 948 | case itm.T == FILE: 949 | var meta NodeMeta 950 | meta.key, err = a32_to_bytes(key) 951 | if err != nil { 952 | return nil, err 953 | } 954 | meta.iv, err = a32_to_bytes([]uint32{compkey[4], compkey[5], 0, 0}) 955 | if err != nil { 956 | return nil, err 957 | } 958 | meta.mac, err = a32_to_bytes([]uint32{compkey[6], compkey[7]}) 959 | if err != nil { 960 | return nil, err 961 | } 962 | meta.compkey, err = a32_to_bytes(compkey) 963 | if err != nil { 964 | return nil, err 965 | } 966 | node.meta = meta 967 | case itm.T == FOLDER: 968 | var meta NodeMeta 969 | meta.key, err = a32_to_bytes(key) 970 | if err != nil { 971 | return nil, err 972 | } 973 | meta.compkey, err = a32_to_bytes(compkey) 974 | if err != nil { 975 | return nil, err 976 | } 977 | node.meta = meta 978 | case itm.T == ROOT: 979 | attr.Name = "Cloud Drive" 980 | m.FS.root = node 981 | case itm.T == INBOX: 982 | attr.Name = "InBox" 983 | m.FS.inbox = node 984 | case itm.T == TRASH: 985 | attr.Name = "Trash" 986 | m.FS.trash = node 987 | } 988 | 989 | // Shared directories 990 | if itm.SUser != "" && itm.SKey != "" { 991 | m.FS.sroots = append(m.FS.sroots, node) 992 | } 993 | 994 | node.name = attr.Name 995 | node.hash = itm.Hash 996 | node.parent = parent 997 | node.ntype = itm.T 998 | 999 | return node, nil 1000 | } 1001 | 1002 | // Get all nodes from filesystem 1003 | func (m *Mega) getFileSystem() error { 1004 | m.FS.mutex.Lock() 1005 | defer m.FS.mutex.Unlock() 1006 | 1007 | var msg [1]FilesMsg 1008 | var res [1]FilesResp 1009 | 1010 | msg[0].Cmd = "f" 1011 | msg[0].C = 1 1012 | 1013 | req, err := json.Marshal(msg) 1014 | if err != nil { 1015 | return err 1016 | } 1017 | result, err := m.api_request(req) 1018 | if err != nil { 1019 | return err 1020 | } 1021 | 1022 | err = json.Unmarshal(result, &res) 1023 | if err != nil { 1024 | return err 1025 | } 1026 | 1027 | for _, sk := range res[0].Ok { 1028 | m.FS.skmap[sk.Hash] = sk.Key 1029 | } 1030 | 1031 | for _, itm := range res[0].F { 1032 | _, err = m.addFSNode(itm) 1033 | if err != nil { 1034 | m.debugf("couldn't decode FSNode %#v: %v ", itm, err) 1035 | continue 1036 | } 1037 | } 1038 | 1039 | m.ssn = res[0].Sn 1040 | 1041 | go m.pollEvents() 1042 | 1043 | return nil 1044 | } 1045 | 1046 | // Download contains the internal state of a download 1047 | type Download struct { 1048 | m *Mega 1049 | src *Node 1050 | resourceUrl string 1051 | aes_block cipher.Block 1052 | iv []byte 1053 | mac_enc cipher.BlockMode 1054 | mutex sync.Mutex // to protect the following 1055 | chunks []chunkSize 1056 | chunk_macs [][]byte 1057 | } 1058 | 1059 | // an all nil IV for mac calculations 1060 | var zero_iv = make([]byte, 16) 1061 | 1062 | // Create a new Download from the src Node 1063 | // 1064 | // Call Chunks to find out how many chunks there are, then for id = 1065 | // 0..chunks-1 call DownloadChunk. Finally call Finish() to receive 1066 | // the error status. 1067 | func (m *Mega) NewDownload(src *Node) (*Download, error) { 1068 | if src == nil { 1069 | return nil, EARGS 1070 | } 1071 | 1072 | var msg [1]DownloadMsg 1073 | var res [1]DownloadResp 1074 | 1075 | m.FS.mutex.Lock() 1076 | msg[0].Cmd = "g" 1077 | msg[0].G = 1 1078 | msg[0].N = src.hash 1079 | if m.config.https { 1080 | msg[0].SSL = 2 1081 | } 1082 | key := src.meta.key 1083 | m.FS.mutex.Unlock() 1084 | 1085 | request, err := json.Marshal(msg) 1086 | if err != nil { 1087 | return nil, err 1088 | } 1089 | result, err := m.api_request(request) 1090 | if err != nil { 1091 | return nil, err 1092 | } 1093 | 1094 | err = json.Unmarshal(result, &res) 1095 | if err != nil { 1096 | return nil, err 1097 | } 1098 | 1099 | // DownloadResp has an embedded error in it for some reason 1100 | if res[0].Err != 0 { 1101 | return nil, parseError(res[0].Err) 1102 | } 1103 | 1104 | _, err = decryptAttr(key, res[0].Attr) 1105 | if err != nil { 1106 | return nil, err 1107 | } 1108 | 1109 | chunks := getChunkSizes(int64(res[0].Size)) 1110 | 1111 | aes_block, err := aes.NewCipher(key) 1112 | if err != nil { 1113 | return nil, err 1114 | } 1115 | 1116 | mac_enc := cipher.NewCBCEncrypter(aes_block, zero_iv) 1117 | m.FS.mutex.Lock() 1118 | t, err := bytes_to_a32(src.meta.iv) 1119 | m.FS.mutex.Unlock() 1120 | if err != nil { 1121 | return nil, err 1122 | } 1123 | iv, err := a32_to_bytes([]uint32{t[0], t[1], t[0], t[1]}) 1124 | if err != nil { 1125 | return nil, err 1126 | } 1127 | 1128 | downloadUrl := res[0].G 1129 | if m.config.https && strings.HasPrefix(downloadUrl, "http://") { 1130 | downloadUrl = "https://" + strings.TrimPrefix(downloadUrl, "http://") 1131 | } 1132 | 1133 | d := &Download{ 1134 | m: m, 1135 | src: src, 1136 | resourceUrl: downloadUrl, 1137 | aes_block: aes_block, 1138 | iv: iv, 1139 | mac_enc: mac_enc, 1140 | chunks: chunks, 1141 | chunk_macs: make([][]byte, len(chunks)), 1142 | } 1143 | return d, nil 1144 | } 1145 | 1146 | // Chunks returns The number of chunks in the download. 1147 | func (d *Download) Chunks() int { 1148 | return len(d.chunks) 1149 | } 1150 | 1151 | // ChunkLocation returns the position in the file and the size of the chunk 1152 | func (d *Download) ChunkLocation(id int) (position int64, size int, err error) { 1153 | if id < 0 || id >= len(d.chunks) { 1154 | return 0, 0, EARGS 1155 | } 1156 | d.mutex.Lock() 1157 | defer d.mutex.Unlock() 1158 | return d.chunks[id].position, d.chunks[id].size, nil 1159 | } 1160 | 1161 | // DownloadChunk gets a chunk with the given number and update the 1162 | // mac, returning the position in the file of the chunk 1163 | func (d *Download) DownloadChunk(id int) (chunk []byte, err error) { 1164 | if id < 0 || id >= len(d.chunks) { 1165 | return nil, EARGS 1166 | } 1167 | 1168 | chk_start, chk_size, err := d.ChunkLocation(id) 1169 | if err != nil { 1170 | return nil, err 1171 | } 1172 | 1173 | var resp *http.Response 1174 | chunk_url := fmt.Sprintf("%s/%d-%d", d.resourceUrl, chk_start, chk_start+int64(chk_size)-1) 1175 | sleepTime := minSleepTime // initial backoff time 1176 | for retry := 0; retry < d.m.retries+1; retry++ { 1177 | resp, err = d.m.client.Get(chunk_url) 1178 | if err == nil { 1179 | if resp.StatusCode == 200 { 1180 | break 1181 | } 1182 | err = errors.New("Http Status: " + resp.Status) 1183 | _ = resp.Body.Close() 1184 | } 1185 | d.m.debugf("%s: Retry download chunk %d/%d: %v", d.src.name, retry, d.m.retries, err) 1186 | backOffSleep(&sleepTime) 1187 | } 1188 | if err != nil { 1189 | return nil, err 1190 | } 1191 | if resp == nil { 1192 | return nil, errors.New("retries exceeded") 1193 | } 1194 | 1195 | chunk, err = io.ReadAll(resp.Body) 1196 | if err != nil { 1197 | _ = resp.Body.Close() 1198 | return nil, err 1199 | } 1200 | 1201 | err = resp.Body.Close() 1202 | if err != nil { 1203 | return nil, err 1204 | } 1205 | 1206 | // body is read and closed here 1207 | 1208 | if len(chunk) != chk_size { 1209 | return nil, errors.New("wrong size for downloaded chunk") 1210 | } 1211 | 1212 | // Decrypt the block 1213 | ctr_iv, err := bytes_to_a32(d.src.meta.iv) 1214 | if err != nil { 1215 | return nil, err 1216 | } 1217 | ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000) 1218 | ctr_iv[3] = uint32(chk_start / 0x10) 1219 | bctr_iv, err := a32_to_bytes(ctr_iv) 1220 | if err != nil { 1221 | return nil, err 1222 | } 1223 | ctr_aes := cipher.NewCTR(d.aes_block, bctr_iv) 1224 | ctr_aes.XORKeyStream(chunk, chunk) 1225 | 1226 | // Update the chunk_macs 1227 | enc := cipher.NewCBCEncrypter(d.aes_block, d.iv) 1228 | i := 0 1229 | block := make([]byte, 16) 1230 | paddedChunk := paddnull(chunk, 16) 1231 | for i = 0; i < len(paddedChunk); i += 16 { 1232 | enc.CryptBlocks(block, paddedChunk[i:i+16]) 1233 | } 1234 | 1235 | d.mutex.Lock() 1236 | if len(d.chunk_macs) > 0 { 1237 | d.chunk_macs[id] = make([]byte, 16) 1238 | copy(d.chunk_macs[id], block) 1239 | } 1240 | d.mutex.Unlock() 1241 | 1242 | return chunk, nil 1243 | } 1244 | 1245 | // Finish checks the accumulated MAC for each block. 1246 | // 1247 | // If all the chunks weren't downloaded then it will just return nil 1248 | func (d *Download) Finish() (err error) { 1249 | // Can't check a 0 sized file 1250 | if len(d.chunk_macs) == 0 { 1251 | return nil 1252 | } 1253 | mac_data := make([]byte, 16) 1254 | for _, v := range d.chunk_macs { 1255 | // If a chunk_macs hasn't been set then the whole file 1256 | // wasn't downloaded and we can't check it 1257 | if v == nil { 1258 | return nil 1259 | } 1260 | d.mac_enc.CryptBlocks(mac_data, v) 1261 | } 1262 | 1263 | tmac, err := bytes_to_a32(mac_data) 1264 | if err != nil { 1265 | return err 1266 | } 1267 | btmac, err := a32_to_bytes([]uint32{tmac[0] ^ tmac[1], tmac[2] ^ tmac[3]}) 1268 | if err != nil { 1269 | return err 1270 | } 1271 | if !bytes.Equal(btmac, d.src.meta.mac) { 1272 | return EMACMISMATCH 1273 | } 1274 | 1275 | return nil 1276 | } 1277 | 1278 | // Download file from filesystem reporting progress if not nil 1279 | func (m *Mega) DownloadFile(src *Node, dstpath string, progress *chan int) error { 1280 | defer func() { 1281 | if progress != nil { 1282 | close(*progress) 1283 | } 1284 | }() 1285 | 1286 | d, err := m.NewDownload(src) 1287 | if err != nil { 1288 | return err 1289 | } 1290 | 1291 | _, err = os.Stat(dstpath) 1292 | if os.IsExist(err) { 1293 | err = os.Remove(dstpath) 1294 | if err != nil { 1295 | return err 1296 | } 1297 | } 1298 | 1299 | outfile, err := os.OpenFile(dstpath, os.O_RDWR|os.O_CREATE, 0600) 1300 | if err != nil { 1301 | return err 1302 | } 1303 | 1304 | workch := make(chan int) 1305 | errch := make(chan error, m.dl_workers) 1306 | wg := sync.WaitGroup{} 1307 | 1308 | // Fire chunk download workers 1309 | for w := 0; w < m.dl_workers; w++ { 1310 | wg.Add(1) 1311 | 1312 | go func() { 1313 | defer wg.Done() 1314 | 1315 | // Wait for work blocked on channel 1316 | for id := range workch { 1317 | chunk, err := d.DownloadChunk(id) 1318 | if err != nil { 1319 | errch <- err 1320 | return 1321 | } 1322 | 1323 | chk_start, _, err := d.ChunkLocation(id) 1324 | if err != nil { 1325 | errch <- err 1326 | return 1327 | } 1328 | 1329 | _, err = outfile.WriteAt(chunk, chk_start) 1330 | if err != nil { 1331 | errch <- err 1332 | return 1333 | } 1334 | 1335 | if progress != nil { 1336 | *progress <- len(chunk) 1337 | } 1338 | } 1339 | }() 1340 | } 1341 | 1342 | // Place chunk download jobs to chan 1343 | err = nil 1344 | for id := 0; id < d.Chunks() && err == nil; { 1345 | select { 1346 | case workch <- id: 1347 | id++ 1348 | case err = <-errch: 1349 | } 1350 | } 1351 | close(workch) 1352 | 1353 | wg.Wait() 1354 | 1355 | closeErr := outfile.Close() 1356 | if err != nil { 1357 | _ = os.Remove(dstpath) 1358 | return err 1359 | } 1360 | if closeErr != nil { 1361 | return closeErr 1362 | } 1363 | 1364 | return d.Finish() 1365 | } 1366 | 1367 | // Upload contains the internal state of a upload 1368 | type Upload struct { 1369 | m *Mega 1370 | parenthash string 1371 | name string 1372 | uploadUrl string 1373 | aes_block cipher.Block 1374 | iv []byte 1375 | kiv []byte 1376 | mac_enc cipher.BlockMode 1377 | kbytes []byte 1378 | ukey []uint32 1379 | mutex sync.Mutex // to protect the following 1380 | chunks []chunkSize 1381 | chunk_macs [][]byte 1382 | completion_handle []byte 1383 | } 1384 | 1385 | // Create a new Upload of name into parent of fileSize 1386 | // 1387 | // Call Chunks to find out how many chunks there are, then for id = 1388 | // 0..chunks-1 Call ChunkLocation then UploadChunk. Finally call 1389 | // Finish() to receive the error status and the *Node. 1390 | func (m *Mega) NewUpload(parent *Node, name string, fileSize int64) (*Upload, error) { 1391 | if parent == nil { 1392 | return nil, EARGS 1393 | } 1394 | 1395 | var msg [1]UploadMsg 1396 | var res [1]UploadResp 1397 | parenthash := parent.GetHash() 1398 | 1399 | msg[0].Cmd = "u" 1400 | msg[0].S = fileSize 1401 | if m.config.https { 1402 | msg[0].SSL = 2 1403 | } 1404 | 1405 | request, err := json.Marshal(msg) 1406 | if err != nil { 1407 | return nil, err 1408 | } 1409 | result, err := m.api_request(request) 1410 | if err != nil { 1411 | return nil, err 1412 | } 1413 | 1414 | err = json.Unmarshal(result, &res) 1415 | if err != nil { 1416 | return nil, err 1417 | } 1418 | 1419 | ukey := []uint32{0, 0, 0, 0, 0, 0} 1420 | for i := range ukey { 1421 | ukey[i] = uint32(mrand.Int31()) 1422 | 1423 | } 1424 | 1425 | kbytes, err := a32_to_bytes(ukey[:4]) 1426 | if err != nil { 1427 | return nil, err 1428 | } 1429 | kiv, err := a32_to_bytes([]uint32{ukey[4], ukey[5], 0, 0}) 1430 | if err != nil { 1431 | return nil, err 1432 | } 1433 | aes_block, err := aes.NewCipher(kbytes) 1434 | if err != nil { 1435 | return nil, err 1436 | } 1437 | 1438 | mac_enc := cipher.NewCBCEncrypter(aes_block, zero_iv) 1439 | iv, err := a32_to_bytes([]uint32{ukey[4], ukey[5], ukey[4], ukey[5]}) 1440 | if err != nil { 1441 | return nil, err 1442 | } 1443 | 1444 | chunks := getChunkSizes(fileSize) 1445 | 1446 | // File size is zero 1447 | // Do one empty request to get the completion handle 1448 | if len(chunks) == 0 { 1449 | chunks = append(chunks, chunkSize{position: 0, size: 0}) 1450 | } 1451 | 1452 | uploadUrl := res[0].P 1453 | if m.config.https && strings.HasPrefix(uploadUrl, "http://") { 1454 | uploadUrl = "https://" + strings.TrimPrefix(uploadUrl, "http://") 1455 | } 1456 | 1457 | u := &Upload{ 1458 | m: m, 1459 | parenthash: parenthash, 1460 | name: name, 1461 | uploadUrl: uploadUrl, 1462 | aes_block: aes_block, 1463 | iv: iv, 1464 | kiv: kiv, 1465 | mac_enc: mac_enc, 1466 | kbytes: kbytes, 1467 | ukey: ukey, 1468 | chunks: chunks, 1469 | chunk_macs: make([][]byte, len(chunks)), 1470 | completion_handle: []byte{}, 1471 | } 1472 | return u, nil 1473 | } 1474 | 1475 | // Chunks returns The number of chunks in the upload. 1476 | func (u *Upload) Chunks() int { 1477 | return len(u.chunks) 1478 | } 1479 | 1480 | // ChunkLocation returns the position in the file and the size of the chunk 1481 | func (u *Upload) ChunkLocation(id int) (position int64, size int, err error) { 1482 | if id < 0 || id >= len(u.chunks) { 1483 | return 0, 0, EARGS 1484 | } 1485 | return u.chunks[id].position, u.chunks[id].size, nil 1486 | } 1487 | 1488 | // UploadChunk uploads the chunk of id 1489 | func (u *Upload) UploadChunk(id int, chunk []byte) (err error) { 1490 | chk_start, chk_size, err := u.ChunkLocation(id) 1491 | if err != nil { 1492 | return err 1493 | } 1494 | if len(chunk) != chk_size { 1495 | return errors.New("upload chunk is wrong size") 1496 | } 1497 | ctr_iv, err := bytes_to_a32(u.kiv) 1498 | if err != nil { 1499 | return err 1500 | } 1501 | ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000) 1502 | ctr_iv[3] = uint32(chk_start / 0x10) 1503 | bctr_iv, err := a32_to_bytes(ctr_iv) 1504 | if err != nil { 1505 | return err 1506 | } 1507 | ctr_aes := cipher.NewCTR(u.aes_block, bctr_iv) 1508 | 1509 | enc := cipher.NewCBCEncrypter(u.aes_block, u.iv) 1510 | 1511 | i := 0 1512 | block := make([]byte, 16) 1513 | paddedchunk := paddnull(chunk, 16) 1514 | for i = 0; i < len(paddedchunk); i += 16 { 1515 | copy(block[0:16], paddedchunk[i:i+16]) 1516 | enc.CryptBlocks(block, block) 1517 | } 1518 | 1519 | var rsp *http.Response 1520 | var req *http.Request 1521 | ctr_aes.XORKeyStream(chunk, chunk) 1522 | chk_url := fmt.Sprintf("%s/%d", u.uploadUrl, chk_start) 1523 | 1524 | sleepTime := minSleepTime // initial backoff time 1525 | for retry := 0; retry < u.m.retries+1; retry++ { 1526 | reader := bytes.NewBuffer(chunk) 1527 | req, err = http.NewRequest("POST", chk_url, reader) 1528 | if err != nil { 1529 | return err 1530 | } 1531 | rsp, err = u.m.client.Do(req) 1532 | if err == nil { 1533 | if rsp.StatusCode == 200 { 1534 | break 1535 | } 1536 | err = errors.New("Http Status: " + rsp.Status) 1537 | _ = rsp.Body.Close() 1538 | } 1539 | u.m.debugf("%s: Retry upload chunk %d/%d: %v", u.name, retry, u.m.retries, err) 1540 | backOffSleep(&sleepTime) 1541 | } 1542 | if err != nil { 1543 | return err 1544 | } 1545 | if rsp == nil { 1546 | return errors.New("retries exceeded") 1547 | } 1548 | 1549 | chunk_resp, err := io.ReadAll(rsp.Body) 1550 | if err != nil { 1551 | _ = rsp.Body.Close() 1552 | return err 1553 | } 1554 | 1555 | err = rsp.Body.Close() 1556 | if err != nil { 1557 | return err 1558 | } 1559 | 1560 | if !bytes.Equal(chunk_resp, nil) { 1561 | u.mutex.Lock() 1562 | u.completion_handle = chunk_resp 1563 | u.mutex.Unlock() 1564 | } 1565 | 1566 | // Update chunk MACs on success only 1567 | u.mutex.Lock() 1568 | if len(u.chunk_macs) > 0 { 1569 | u.chunk_macs[id] = make([]byte, 16) 1570 | copy(u.chunk_macs[id], block) 1571 | } 1572 | u.mutex.Unlock() 1573 | 1574 | return nil 1575 | } 1576 | 1577 | // Finish completes the upload and returns the created node 1578 | func (u *Upload) Finish() (node *Node, err error) { 1579 | mac_data := make([]byte, 16) 1580 | for _, v := range u.chunk_macs { 1581 | u.mac_enc.CryptBlocks(mac_data, v) 1582 | } 1583 | 1584 | t, err := bytes_to_a32(mac_data) 1585 | if err != nil { 1586 | return nil, err 1587 | } 1588 | meta_mac := []uint32{t[0] ^ t[1], t[2] ^ t[3]} 1589 | 1590 | attr := FileAttr{u.name} 1591 | 1592 | attr_data, err := encryptAttr(u.kbytes, attr) 1593 | if err != nil { 1594 | return nil, err 1595 | } 1596 | 1597 | key := []uint32{u.ukey[0] ^ u.ukey[4], u.ukey[1] ^ u.ukey[5], 1598 | u.ukey[2] ^ meta_mac[0], u.ukey[3] ^ meta_mac[1], 1599 | u.ukey[4], u.ukey[5], meta_mac[0], meta_mac[1]} 1600 | 1601 | buf, err := a32_to_bytes(key) 1602 | if err != nil { 1603 | return nil, err 1604 | } 1605 | master_aes, err := aes.NewCipher(u.m.k) 1606 | if err != nil { 1607 | return nil, err 1608 | } 1609 | enc := cipher.NewCBCEncrypter(master_aes, zero_iv) 1610 | enc.CryptBlocks(buf[:16], buf[:16]) 1611 | enc = cipher.NewCBCEncrypter(master_aes, zero_iv) 1612 | enc.CryptBlocks(buf[16:], buf[16:]) 1613 | 1614 | var cmsg [1]UploadCompleteMsg 1615 | var cres [1]UploadCompleteResp 1616 | 1617 | cmsg[0].Cmd = "p" 1618 | cmsg[0].T = u.parenthash 1619 | cmsg[0].N[0].H = string(u.completion_handle) 1620 | cmsg[0].N[0].T = FILE 1621 | cmsg[0].N[0].A = attr_data 1622 | cmsg[0].N[0].K = base64urlencode(buf) 1623 | 1624 | request, err := json.Marshal(cmsg) 1625 | if err != nil { 1626 | return nil, err 1627 | } 1628 | result, err := u.m.api_request(request) 1629 | if err != nil { 1630 | return nil, err 1631 | } 1632 | 1633 | err = json.Unmarshal(result, &cres) 1634 | if err != nil { 1635 | return nil, err 1636 | } 1637 | 1638 | u.m.FS.mutex.Lock() 1639 | defer u.m.FS.mutex.Unlock() 1640 | return u.m.addFSNode(cres[0].F[0]) 1641 | } 1642 | 1643 | // Upload a file to the filesystem 1644 | func (m *Mega) UploadFile(srcpath string, parent *Node, name string, progress *chan int) (node *Node, err error) { 1645 | defer func() { 1646 | if progress != nil { 1647 | close(*progress) 1648 | } 1649 | }() 1650 | 1651 | var infile *os.File 1652 | var fileSize int64 1653 | 1654 | info, err := os.Stat(srcpath) 1655 | if err == nil { 1656 | fileSize = info.Size() 1657 | } 1658 | 1659 | infile, err = os.OpenFile(srcpath, os.O_RDONLY, 0666) 1660 | if err != nil { 1661 | return nil, err 1662 | } 1663 | defer func() { 1664 | e := infile.Close() 1665 | if err == nil { 1666 | err = e 1667 | } 1668 | }() 1669 | 1670 | if name == "" { 1671 | name = filepath.Base(srcpath) 1672 | } 1673 | 1674 | u, err := m.NewUpload(parent, name, fileSize) 1675 | if err != nil { 1676 | return nil, err 1677 | } 1678 | 1679 | workch := make(chan int) 1680 | errch := make(chan error, m.ul_workers) 1681 | wg := sync.WaitGroup{} 1682 | 1683 | // Fire chunk upload workers 1684 | for w := 0; w < m.ul_workers; w++ { 1685 | wg.Add(1) 1686 | 1687 | go func() { 1688 | defer wg.Done() 1689 | 1690 | for id := range workch { 1691 | chk_start, chk_size, err := u.ChunkLocation(id) 1692 | if err != nil { 1693 | errch <- err 1694 | return 1695 | } 1696 | chunk := make([]byte, chk_size) 1697 | n, err := infile.ReadAt(chunk, chk_start) 1698 | if err != nil && err != io.EOF { 1699 | errch <- err 1700 | return 1701 | } 1702 | if n != len(chunk) { 1703 | errch <- errors.New("chunk too short") 1704 | return 1705 | } 1706 | 1707 | err = u.UploadChunk(id, chunk) 1708 | if err != nil { 1709 | errch <- err 1710 | return 1711 | } 1712 | 1713 | if progress != nil { 1714 | *progress <- chk_size 1715 | } 1716 | } 1717 | }() 1718 | } 1719 | 1720 | // Place chunk download jobs to chan 1721 | err = nil 1722 | for id := 0; id < u.Chunks() && err == nil; { 1723 | select { 1724 | case workch <- id: 1725 | id++ 1726 | case err = <-errch: 1727 | } 1728 | } 1729 | 1730 | close(workch) 1731 | 1732 | wg.Wait() 1733 | 1734 | if err != nil { 1735 | return nil, err 1736 | } 1737 | 1738 | return u.Finish() 1739 | } 1740 | 1741 | // Move a file from one location to another 1742 | func (m *Mega) Move(src *Node, parent *Node) error { 1743 | m.FS.mutex.Lock() 1744 | defer m.FS.mutex.Unlock() 1745 | 1746 | if src == nil || parent == nil { 1747 | return EARGS 1748 | } 1749 | var msg [1]MoveFileMsg 1750 | var err error 1751 | 1752 | msg[0].Cmd = "m" 1753 | msg[0].N = src.hash 1754 | msg[0].T = parent.hash 1755 | msg[0].I, err = randString(10) 1756 | if err != nil { 1757 | return err 1758 | } 1759 | 1760 | request, err := json.Marshal(msg) 1761 | if err != nil { 1762 | return err 1763 | } 1764 | _, err = m.api_request(request) 1765 | if err != nil { 1766 | return err 1767 | } 1768 | 1769 | if src.parent != nil { 1770 | src.parent.removeChild(src) 1771 | } 1772 | 1773 | parent.addChild(src) 1774 | src.parent = parent 1775 | 1776 | return nil 1777 | } 1778 | 1779 | // Rename a file or folder 1780 | func (m *Mega) Rename(src *Node, name string) error { 1781 | m.FS.mutex.Lock() 1782 | defer m.FS.mutex.Unlock() 1783 | 1784 | if src == nil { 1785 | return EARGS 1786 | } 1787 | var msg [1]FileAttrMsg 1788 | 1789 | master_aes, err := aes.NewCipher(m.k) 1790 | if err != nil { 1791 | return err 1792 | } 1793 | attr := FileAttr{name} 1794 | attr_data, err := encryptAttr(src.meta.key, attr) 1795 | if err != nil { 1796 | return err 1797 | } 1798 | key := make([]byte, len(src.meta.compkey)) 1799 | err = blockEncrypt(master_aes, key, src.meta.compkey) 1800 | if err != nil { 1801 | return err 1802 | } 1803 | 1804 | msg[0].Cmd = "a" 1805 | msg[0].Attr = attr_data 1806 | msg[0].Key = base64urlencode(key) 1807 | msg[0].N = src.hash 1808 | msg[0].I, err = randString(10) 1809 | if err != nil { 1810 | return err 1811 | } 1812 | 1813 | req, err := json.Marshal(msg) 1814 | if err != nil { 1815 | return err 1816 | } 1817 | _, err = m.api_request(req) 1818 | if err != nil { 1819 | return err 1820 | } 1821 | 1822 | src.name = name 1823 | 1824 | return nil 1825 | } 1826 | 1827 | // Create a directory in the filesystem 1828 | func (m *Mega) CreateDir(name string, parent *Node) (*Node, error) { 1829 | m.FS.mutex.Lock() 1830 | defer m.FS.mutex.Unlock() 1831 | 1832 | if parent == nil { 1833 | return nil, EARGS 1834 | } 1835 | var msg [1]UploadCompleteMsg 1836 | var res [1]UploadCompleteResp 1837 | 1838 | compkey := []uint32{0, 0, 0, 0, 0, 0} 1839 | for i := range compkey { 1840 | compkey[i] = uint32(mrand.Int31()) 1841 | } 1842 | 1843 | master_aes, err := aes.NewCipher(m.k) 1844 | if err != nil { 1845 | return nil, err 1846 | } 1847 | attr := FileAttr{name} 1848 | ukey, err := a32_to_bytes(compkey[:4]) 1849 | if err != nil { 1850 | return nil, err 1851 | } 1852 | attr_data, err := encryptAttr(ukey, attr) 1853 | if err != nil { 1854 | return nil, err 1855 | } 1856 | key := make([]byte, len(ukey)) 1857 | err = blockEncrypt(master_aes, key, ukey) 1858 | if err != nil { 1859 | return nil, err 1860 | } 1861 | 1862 | msg[0].Cmd = "p" 1863 | msg[0].T = parent.hash 1864 | msg[0].N[0].H = "xxxxxxxx" 1865 | msg[0].N[0].T = FOLDER 1866 | msg[0].N[0].A = attr_data 1867 | msg[0].N[0].K = base64urlencode(key) 1868 | msg[0].I, err = randString(10) 1869 | if err != nil { 1870 | return nil, err 1871 | } 1872 | 1873 | req, err := json.Marshal(msg) 1874 | if err != nil { 1875 | return nil, err 1876 | } 1877 | result, err := m.api_request(req) 1878 | if err != nil { 1879 | return nil, err 1880 | } 1881 | 1882 | err = json.Unmarshal(result, &res) 1883 | if err != nil { 1884 | return nil, err 1885 | } 1886 | node, err := m.addFSNode(res[0].F[0]) 1887 | 1888 | return node, err 1889 | } 1890 | 1891 | // Delete a file or directory from filesystem 1892 | func (m *Mega) Delete(node *Node, destroy bool) error { 1893 | if node == nil { 1894 | return EARGS 1895 | } 1896 | if !destroy { 1897 | return m.Move(node, m.FS.trash) 1898 | } 1899 | 1900 | m.FS.mutex.Lock() 1901 | defer m.FS.mutex.Unlock() 1902 | 1903 | var msg [1]FileDeleteMsg 1904 | var err error 1905 | msg[0].Cmd = "d" 1906 | msg[0].N = node.hash 1907 | msg[0].I, err = randString(10) 1908 | if err != nil { 1909 | return err 1910 | } 1911 | 1912 | req, err := json.Marshal(msg) 1913 | if err != nil { 1914 | return err 1915 | } 1916 | _, err = m.api_request(req) 1917 | if err != nil { 1918 | return err 1919 | } 1920 | 1921 | parent := m.FS.lookup[node.hash] 1922 | parent.removeChild(node) 1923 | delete(m.FS.lookup, node.hash) 1924 | 1925 | return nil 1926 | } 1927 | 1928 | // process an add node event 1929 | func (m *Mega) processAddNode(evRaw []byte) error { 1930 | m.FS.mutex.Lock() 1931 | defer m.FS.mutex.Unlock() 1932 | 1933 | var ev FSEvent 1934 | err := json.Unmarshal(evRaw, &ev) 1935 | if err != nil { 1936 | return err 1937 | } 1938 | 1939 | for _, itm := range ev.T.Files { 1940 | _, err = m.addFSNode(itm) 1941 | if err != nil { 1942 | return err 1943 | } 1944 | } 1945 | return nil 1946 | } 1947 | 1948 | // process an update node event 1949 | func (m *Mega) processUpdateNode(evRaw []byte) error { 1950 | m.FS.mutex.Lock() 1951 | defer m.FS.mutex.Unlock() 1952 | 1953 | var ev FSEvent 1954 | err := json.Unmarshal(evRaw, &ev) 1955 | if err != nil { 1956 | return err 1957 | } 1958 | 1959 | node := m.FS.hashLookup(ev.N) 1960 | if node == nil { 1961 | return ENOENT 1962 | } 1963 | attr, err := decryptAttr(node.meta.key, ev.Attr) 1964 | if err == nil { 1965 | node.name = attr.Name 1966 | } else { 1967 | node.name = "BAD ATTRIBUTE" 1968 | } 1969 | 1970 | node.ts = time.Unix(ev.Ts, 0) 1971 | return nil 1972 | } 1973 | 1974 | // process a delete node event 1975 | func (m *Mega) processDeleteNode(evRaw []byte) error { 1976 | m.FS.mutex.Lock() 1977 | defer m.FS.mutex.Unlock() 1978 | 1979 | var ev FSEvent 1980 | err := json.Unmarshal(evRaw, &ev) 1981 | if err != nil { 1982 | return err 1983 | } 1984 | 1985 | node := m.FS.hashLookup(ev.N) 1986 | if node != nil && node.parent != nil { 1987 | node.parent.removeChild(node) 1988 | delete(m.FS.lookup, node.hash) 1989 | } 1990 | return nil 1991 | } 1992 | 1993 | // Listen for server event notifications and play actions 1994 | func (m *Mega) pollEvents() { 1995 | var err error 1996 | var resp *http.Response 1997 | sleepTime := minSleepTime // initial backoff time 1998 | for { 1999 | if err != nil { 2000 | m.debugf("pollEvents: error from server", err) 2001 | backOffSleep(&sleepTime) 2002 | } else { 2003 | // reset sleep time to minimum on success 2004 | sleepTime = minSleepTime 2005 | } 2006 | 2007 | url := fmt.Sprintf("%s/sc?sn=%s&sid=%s", m.baseurl, m.ssn, m.sid) 2008 | resp, err = m.client.Post(url, "application/xml", nil) 2009 | if err != nil { 2010 | m.logf("pollEvents: Error fetching status: %s", err) 2011 | continue 2012 | } 2013 | 2014 | if resp.StatusCode != 200 { 2015 | m.logf("pollEvents: Error from server: %s", resp.Status) 2016 | _ = resp.Body.Close() 2017 | continue 2018 | } 2019 | 2020 | buf, err := io.ReadAll(resp.Body) 2021 | if err != nil { 2022 | m.logf("pollEvents: Error reading body: %v", err) 2023 | _ = resp.Body.Close() 2024 | continue 2025 | } 2026 | err = resp.Body.Close() 2027 | if err != nil { 2028 | m.logf("pollEvents: Error closing body: %v", err) 2029 | continue 2030 | } 2031 | 2032 | // body is read and closed here 2033 | 2034 | // First attempt to parse an array 2035 | var events Events 2036 | err = json.Unmarshal(buf, &events) 2037 | if err != nil { 2038 | // Try parsing as a lone error message 2039 | var emsg ErrorMsg 2040 | err = json.Unmarshal(buf, &emsg) 2041 | if err != nil { 2042 | m.logf("pollEvents: Bad response received from server: %s", buf) 2043 | } else { 2044 | err = parseError(emsg) 2045 | if err == EAGAIN { 2046 | } else if err != nil { 2047 | m.logf("pollEvents: Error received from server: %v", err) 2048 | } 2049 | } 2050 | continue 2051 | } 2052 | 2053 | // if wait URL is set, then fetch it and continue - we 2054 | // don't expect anything else if we have a wait URL. 2055 | if events.W != "" { 2056 | m.waitEventsFire() 2057 | if len(events.E) > 0 { 2058 | m.logf("pollEvents: Unexpected event with w set: %s", buf) 2059 | } 2060 | resp, err = m.client.Get(events.W) 2061 | if err == nil { 2062 | _ = resp.Body.Close() 2063 | } 2064 | continue 2065 | } 2066 | m.ssn = events.Sn 2067 | 2068 | // For each event in the array, parse it 2069 | for _, evRaw := range events.E { 2070 | // First attempt to unmarshal as an error message 2071 | var emsg ErrorMsg 2072 | err = json.Unmarshal(evRaw, &emsg) 2073 | if err == nil { 2074 | m.logf("pollEvents: Error message received %s", evRaw) 2075 | err = parseError(emsg) 2076 | if err != nil { 2077 | m.logf("pollEvents: Event from server was error: %v", err) 2078 | } 2079 | continue 2080 | } 2081 | 2082 | // Now unmarshal as a generic event 2083 | var gev GenericEvent 2084 | err = json.Unmarshal(evRaw, &gev) 2085 | if err != nil { 2086 | m.logf("pollEvents: Couldn't parse event from server: %v: %s", err, evRaw) 2087 | continue 2088 | } 2089 | m.debugf("pollEvents: Parsing event %q: %s", gev.Cmd, evRaw) 2090 | 2091 | // Work out what to do with the event 2092 | var process func([]byte) error 2093 | switch gev.Cmd { 2094 | case "t": // node addition 2095 | process = m.processAddNode 2096 | case "u": // node update 2097 | process = m.processUpdateNode 2098 | case "d": // node deletion 2099 | process = m.processDeleteNode 2100 | case "s", "s2": // share addition/update/revocation 2101 | case "c": // contact addition/update 2102 | case "k": // crypto key request 2103 | case "fa": // file attribute update 2104 | case "ua": // user attribute update 2105 | case "psts": // account updated 2106 | case "ipc": // incoming pending contact request (to us) 2107 | case "opc": // outgoing pending contact request (from us) 2108 | case "upci": // incoming pending contact request update (accept/deny/ignore) 2109 | case "upco": // outgoing pending contact request update (from them, accept/deny/ignore) 2110 | case "ph": // public links handles 2111 | case "se": // set email 2112 | case "mcc": // chat creation / peer's invitation / peer's removal 2113 | case "mcna": // granted / revoked access to a node 2114 | case "uac": // user access control 2115 | default: 2116 | m.debugf("pollEvents: Unknown message %q received: %s", gev.Cmd, evRaw) 2117 | } 2118 | 2119 | // process the event if we can 2120 | if process != nil { 2121 | err := process(evRaw) 2122 | if err != nil { 2123 | m.logf("pollEvents: Error processing event %q '%s': %v", gev.Cmd, evRaw, err) 2124 | } 2125 | } 2126 | } 2127 | } 2128 | } 2129 | 2130 | func (m *Mega) getLink(n *Node) (string, error) { 2131 | var msg [1]GetLinkMsg 2132 | var res [1]string 2133 | 2134 | msg[0].Cmd = "l" 2135 | msg[0].N = n.GetHash() 2136 | 2137 | req, err := json.Marshal(msg) 2138 | if err != nil { 2139 | return "", err 2140 | } 2141 | result, err := m.api_request(req) 2142 | if err != nil { 2143 | return "", err 2144 | } 2145 | err = json.Unmarshal(result, &res) 2146 | if err != nil { 2147 | return "", err 2148 | } 2149 | return res[0], nil 2150 | } 2151 | 2152 | // Exports public link for node, with or without decryption key included 2153 | func (m *Mega) Link(n *Node, includeKey bool) (string, error) { 2154 | id, err := m.getLink(n) 2155 | if err != nil { 2156 | return "", err 2157 | } 2158 | if includeKey { 2159 | m.FS.mutex.Lock() 2160 | key := base64urlencode(n.meta.compkey) 2161 | m.FS.mutex.Unlock() 2162 | return fmt.Sprintf("%v/#!%v!%v", BASE_DOWNLOAD_URL, id, key), nil 2163 | } else { 2164 | return fmt.Sprintf("%v/#!%v", BASE_DOWNLOAD_URL, id), nil 2165 | } 2166 | } 2167 | 2168 | // addRequestHeaders adds standard headers to a request 2169 | func addRequestHeaders(req *http.Request) { 2170 | userAgent := os.Getenv("X_MEGA_USER_AGENT") 2171 | if userAgent != "" || X_MEGA_USER_AGENT != "" { 2172 | req.Header.Set("User-Agent", userAgent) 2173 | } 2174 | req.Header.Set("Content-Type", "application/json") 2175 | } 2176 | 2177 | // addHashCashRequestHeaders adds standard headers and hashcash headers to a request 2178 | func addHashCashRequestHeaders(req *http.Request, token string, cashValue string) { 2179 | addRequestHeaders(req) 2180 | if token != "" && cashValue != "" { 2181 | req.Header.Set("X-Hashcash", fmt.Sprintf("1:%s:%s", token, cashValue)) 2182 | } 2183 | } 2184 | 2185 | // getAPIBaseURL returns the base URL for API requests 2186 | func getAPIBaseURL() string { 2187 | url := os.Getenv("X_MEGA_API_URL") 2188 | if url == "" { 2189 | return API_URL 2190 | } 2191 | return url 2192 | } 2193 | --------------------------------------------------------------------------------