├── README.md ├── google ├── gcscat │ └── main.go ├── metaflag │ └── main.go └── gcs │ └── gcs.go ├── LICENSE ├── diskcache ├── cache_test.go └── cache.go └── http.go /README.md: -------------------------------------------------------------------------------- 1 | go get [-u] rsc.io/cloud 2 | https://godoc.org/rsc.io/cloud 3 | -------------------------------------------------------------------------------- /google/gcscat/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package main 6 | 7 | import ( 8 | "flag" 9 | "fmt" 10 | "io" 11 | "log" 12 | "os" 13 | 14 | "rsc.io/cloud/diskcache" 15 | "rsc.io/cloud/google/gcs" 16 | ) 17 | 18 | var ( 19 | cache *diskcache.Cache 20 | exitStatus int 21 | ) 22 | 23 | var ( 24 | flagExpire = flag.Duration("expire", 0, "expiration `interval`") 25 | flagCacheDir = flag.String("cache", "/tmp/gcscache", "store cache in `dir`") 26 | ) 27 | 28 | func usage() { 29 | fmt.Fprintf(os.Stderr, "usage: gcscat bucket/path ...\n") 30 | os.Exit(2) 31 | } 32 | 33 | func main() { 34 | log.SetFlags(0) 35 | log.SetPrefix("gcscat: ") 36 | flag.Usage = usage 37 | flag.Parse() 38 | if flag.NArg() == 0 { 39 | usage() 40 | } 41 | 42 | loader, err := gcs.NewLoader("/") 43 | cache, err = diskcache.New(*flagCacheDir, loader) 44 | if *flagExpire != 0 { 45 | cache.SetExpiration(*flagExpire) 46 | } 47 | if err != nil { 48 | log.Fatal(err) 49 | } 50 | 51 | for _, arg := range flag.Args() { 52 | cat(arg) 53 | } 54 | os.Exit(exitStatus) 55 | } 56 | 57 | func cat(arg string) { 58 | f, err := cache.Open(arg) 59 | if err != nil { 60 | log.Print(err) 61 | exitStatus = 1 62 | return 63 | } 64 | if _, err := io.Copy(os.Stdout, f); err != nil { 65 | exitStatus = 1 66 | } 67 | f.Close() 68 | } 69 | -------------------------------------------------------------------------------- /google/metaflag/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package metaflag sets command-line flags according to the 6 | // metadata specified in a Google Compute Engine instance creation request. 7 | package metaflag 8 | 9 | import ( 10 | "flag" 11 | "log" 12 | 13 | "google.golang.org/cloud/compute/metadata" 14 | ) 15 | 16 | // Init looks up each registered command-line flag in the 17 | // Google Compute Engine instance metadata server to see 18 | // if a value was specified in the instance creation request. 19 | // If so, Init sets the flag to that value. 20 | // 21 | // For example, if the instance request said --metadata x=y and there is 22 | // a flag named x, metaflag.Init causes the flag x to be set to y. 23 | // 24 | // The expected use is: 25 | // 26 | // metadata.Init() 27 | // flag.Parse() 28 | // 29 | // to allow the instance creation request to set default flag values but still allow 30 | // the command line to override those. 31 | func Init() { 32 | println("METAFLAG") 33 | flag.VisitAll(func(f *flag.Flag) { 34 | val, err := metadata.InstanceAttributeValue(f.Name) 35 | if err != nil { 36 | println("GET", f.Name, "=>", err.Error()) 37 | } 38 | if err == nil { 39 | println("GET", f.Name, "=>", val) 40 | if err := flag.Set(f.Name, val); err != nil { 41 | log.Fatal(err) 42 | } 43 | } 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009 The Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /diskcache/cache_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package diskcache 6 | 7 | import ( 8 | "fmt" 9 | "io/ioutil" 10 | "os" 11 | "strconv" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | func newCache(t *testing.T, loader Loader) (c *Cache, cleanup func()) { 17 | dir, err := ioutil.TempDir("", "diskcache-test-") 18 | if err != nil { 19 | t.Fatal(err) 20 | } 21 | cleanup = func() { os.RemoveAll(dir) } 22 | c, err = New(dir+"/cache", loader) 23 | if err != nil { 24 | cleanup() 25 | t.Fatal(err) 26 | } 27 | return c, cleanup 28 | } 29 | 30 | func readFile(t *testing.T, c *Cache, name string) []byte { 31 | f, err := c.Open(name) 32 | if err != nil { 33 | t.Fatalf("readFile %s: %v", name, err) 34 | } 35 | data, err := ioutil.ReadAll(f) 36 | if err != nil { 37 | t.Fatalf("readFile %s: %v", name, err) 38 | } 39 | return data 40 | } 41 | 42 | type loaderFunc func(string, *os.File, []byte) (bool, []byte, error) 43 | 44 | func (f loaderFunc) Load(path string, target *os.File, meta []byte) (cacheValid bool, newMeta []byte, err error) { 45 | return f(path, target, meta) 46 | } 47 | 48 | func loadHello(path string, target *os.File, meta []byte) (bool, []byte, error) { 49 | n, _ := strconv.Atoi(string(meta)) 50 | n++ 51 | 52 | fmt.Fprintf(target, "hello, %s #%d\n", path, n) 53 | return false, []byte(fmt.Sprint(n)), nil 54 | } 55 | 56 | func TestBasic(t *testing.T) { 57 | c, cleanup := newCache(t, loaderFunc(loadHello)) 58 | defer cleanup() 59 | 60 | const first = "hello, /file #1\n" 61 | if data := readFile(t, c, "file"); string(data) != first { 62 | t.Fatalf("original read file = %q, want %q", data, first) 63 | } 64 | if data2 := readFile(t, c, "file"); string(data2) != first { 65 | t.Fatalf("cached read file = %q, want %q", data2, first) 66 | } 67 | 68 | const second = "hello, /file #2\n" 69 | c.SetExpiration(1 * time.Nanosecond) 70 | if data3 := readFile(t, c, "file"); string(data3) != second { 71 | t.Fatalf("expired read file = %q, want %q", data3, second) 72 | } 73 | const third = "hello, /file #3\n" 74 | if data4 := readFile(t, c, "file"); string(data4) != third { 75 | t.Fatalf("recached read file = %q, want %q", data4, third) 76 | } 77 | c.SetExpiration(0) 78 | if data5 := readFile(t, c, "file"); string(data5) != third { 79 | t.Fatalf("recached read file = %q, want %q", data5, third) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /google/gcs/gcs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package gcs implements diskcache.Loader using Google Cloud Storage. 6 | package gcs 7 | 8 | import ( 9 | "fmt" 10 | "io" 11 | "net/http" 12 | "os" 13 | pathpkg "path" 14 | "strings" 15 | 16 | "golang.org/x/oauth2" 17 | "golang.org/x/oauth2/google" 18 | "rsc.io/cloud/diskcache" 19 | ) 20 | 21 | const scopeReadOnly = "https://www.googleapis.com/auth/devstorage.read_only" 22 | 23 | func NewLoader(root string) (diskcache.Loader, error) { 24 | client, err := google.DefaultClient(oauth2.NoContext, scopeReadOnly) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return NewLoaderWithClient(client, root), nil 29 | } 30 | 31 | func NewLoaderWithClient(client *http.Client, root string) diskcache.Loader { 32 | l := &loader{ 33 | client: client, 34 | root: root, 35 | } 36 | return l 37 | } 38 | 39 | type loader struct { 40 | client *http.Client 41 | root string 42 | } 43 | 44 | func (l *loader) Load(path string, target *os.File, meta []byte) (cacheValid bool, newMeta []byte, err error) { 45 | path = pathpkg.Join("/", l.root, path)[1:] 46 | println("LOAD", path) 47 | defer func() { 48 | if err != nil { 49 | println("LOAD ERROR", err.Error()) 50 | } 51 | }() 52 | i := strings.Index(path, "/") 53 | if i < 0 { 54 | return false, nil, fmt.Errorf("path too short") 55 | } 56 | 57 | // NOTE(rsc): It's tempting to use the JSON API v1 instead of the XML API, 58 | // just on general principle, but the URL encoding is different. 59 | // For gs://swtch/web/index.html, the XML API URL is 60 | // https://storage.googleapis.com/swtch/web/index.html 61 | // but the JSON API v1 URL for the data is 62 | // https://www.googleapis.com/storage/v1/b/swtch/o/web%2Findex.html?alt=media 63 | // There's no body here, just the raw content, so XML vs JSON doesn't actually matter to us. 64 | // We use the one with the simpler URL. 65 | // Also I seem to get 404s when I try even the correct JSON URL here, 66 | // although somehow not from curl. This is clearly a giant mess. 67 | // There may be an escaping problem lurking here even with the XML API. Not clear. 68 | 69 | url := "https://storage.googleapis.com/" + path 70 | println("URL", url) 71 | req, err := http.NewRequest("GET", url, nil) 72 | if len(meta) > 0 { 73 | req.Header.Set("If-None-Match", string(meta)) 74 | } 75 | resp, err := l.client.Do(req) 76 | if err != nil { 77 | return false, nil, err 78 | } 79 | defer resp.Body.Close() 80 | if resp.StatusCode == 304 { 81 | return true, meta, nil 82 | } 83 | if resp.StatusCode != 200 { 84 | if resp.StatusCode == 404 { 85 | return false, nil, &os.PathError{Path: path, Op: "read", Err: os.ErrNotExist} 86 | } 87 | return false, nil, &os.PathError{Path: path, Op: "read", Err: fmt.Errorf("%s", resp.Status)} 88 | } 89 | 90 | // TODO(rsc): Maybe work harder with range requests to restart interrupted transfers. 91 | _, err = io.Copy(target, resp.Body) 92 | if err != nil { 93 | return false, nil, err 94 | } 95 | 96 | meta = []byte(resp.Header.Get("Etag")) 97 | return false, meta, nil 98 | } 99 | -------------------------------------------------------------------------------- /http.go: -------------------------------------------------------------------------------- 1 | // Package cloud provides utilities for writing simple cloud-based web servers. 2 | // 3 | // Most of the heavy lifting is done by packages in subdirectories. 4 | // Package cloud itself mainly implements connections to the 5 | // standard library and other interfaces. 6 | // 7 | // This entire repo is but the draft of a draft. It exists to support the swtch.com web server. 8 | // It may mature into something more general, or it may not. 9 | // 10 | package cloud 11 | 12 | import ( 13 | "crypto/tls" 14 | "io" 15 | "log" 16 | "net" 17 | "net/http" 18 | "os" 19 | "strings" 20 | "time" 21 | 22 | "rsc.io/cloud/diskcache" 23 | ) 24 | 25 | // Dir returns an http.FileSystem corresponding to the cached file subtree rooted at dir. 26 | // 27 | // A typical use of Dir is to pass to http.FileServer to create an HTTP handler 28 | // serving files from a cached subtree: 29 | // 30 | // http.Handle("/static/", http.StripPrefix("/static", http.FileServer(cloud.Dir(cache, "/myfiles")))) 31 | // 32 | func Dir(cache *diskcache.Cache, dir string) http.FileSystem { 33 | return &fileSystem{cache, dir} 34 | } 35 | 36 | type fileSystem struct { 37 | c *diskcache.Cache 38 | root string 39 | } 40 | 41 | func (fs *fileSystem) Open(path string) (http.File, error) { 42 | if strings.Contains(path, "/cgi-bin/") || strings.Contains(path, "/.") { 43 | return nil, &os.PathError{Path: path, Op: "open", Err: os.ErrNotExist} 44 | } 45 | f, err := fs.c.Open(fs.root + "/" + path) 46 | if err != nil { 47 | // File doesn't exist, but might be a directory. 48 | // If index.html exists, return an empty directory. 49 | // That's enough for the http server to try to open index.html. 50 | if f, err1 := fs.c.Open(fs.root + "/" + path + "/index.html"); err1 == nil { 51 | f.Close() 52 | return &emptyDir{}, nil 53 | } 54 | log.Printf("cloud.Dir: open %s: %v", path, err) 55 | return nil, err 56 | } 57 | return f, nil 58 | } 59 | 60 | type emptyDir struct{} 61 | 62 | func (*emptyDir) Close() error { return nil } 63 | func (*emptyDir) Read([]byte) (int, error) { return 0, io.EOF } 64 | func (*emptyDir) Readdir(count int) ([]os.FileInfo, error) { return nil, io.EOF } 65 | func (*emptyDir) Seek(offset int64, whence int) (int64, error) { return 0, nil } 66 | func (*emptyDir) Stat() (os.FileInfo, error) { return &dirInfo{}, nil } 67 | 68 | type dirInfo struct{} 69 | 70 | func (*dirInfo) Name() string { return "/" } 71 | func (*dirInfo) Size() int64 { return 0 } 72 | func (*dirInfo) Mode() os.FileMode { return os.ModeDir | 0555 } 73 | func (*dirInfo) ModTime() time.Time { return time.Now() } 74 | func (*dirInfo) IsDir() bool { return true } 75 | func (*dirInfo) Sys() interface{} { return nil } 76 | 77 | // LoadX509KeyPair is crypto/tls's LoadX509KeyPair, but it reads the key pair from cache instead of local disk. 78 | func LoadX509KeyPair(cache *diskcache.Cache, certFile, keyFile string) (tls.Certificate, error) { 79 | certPEMBlock, err := cache.ReadFile(certFile) 80 | if err != nil { 81 | return tls.Certificate{}, err 82 | } 83 | keyPEMBlock, err := cache.ReadFile(keyFile) 84 | if err != nil { 85 | return tls.Certificate{}, err 86 | } 87 | return tls.X509KeyPair(certPEMBlock, keyPEMBlock) 88 | } 89 | 90 | // ServeHTTPS is net/http's ListenAndServeTLS, but it reads the key pair from cache instead of local disk. 91 | func ServeHTTPS(addr string, cache *diskcache.Cache, certFile, keyFile string, handler http.Handler) error { 92 | srv := &http.Server{Addr: addr, Handler: handler} 93 | addr = srv.Addr 94 | if addr == "" { 95 | addr = ":https" 96 | } 97 | config := &tls.Config{} 98 | if srv.TLSConfig != nil { 99 | *config = *srv.TLSConfig 100 | } 101 | if config.NextProtos == nil { 102 | config.NextProtos = []string{"http/1.1"} 103 | } 104 | 105 | var err error 106 | config.Certificates = make([]tls.Certificate, 1) 107 | config.Certificates[0], err = LoadX509KeyPair(cache, certFile, keyFile) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | ln, err := net.Listen("tcp", addr) 113 | if err != nil { 114 | return err 115 | } 116 | 117 | tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) 118 | return srv.Serve(tlsListener) 119 | } 120 | 121 | // ServeHTTP invokes net/http's ListenAndServe; 122 | // it is here only for symmetry with ServeHTTPS. 123 | func ServeHTTP(addr string, handler http.Handler) error { 124 | return http.ListenAndServe(addr, handler) 125 | } 126 | 127 | // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 128 | // connections. It's used by ListenAndServe and ListenAndServeTLS so 129 | // dead TCP connections (e.g. closing laptop mid-download) eventually 130 | // go away. 131 | // 132 | // Copied from net/http. 133 | type tcpKeepAliveListener struct { 134 | *net.TCPListener 135 | } 136 | 137 | func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 138 | tc, err := ln.AcceptTCP() 139 | if err != nil { 140 | return 141 | } 142 | tc.SetKeepAlive(true) 143 | tc.SetKeepAlivePeriod(3 * time.Minute) 144 | return tc, nil 145 | } 146 | -------------------------------------------------------------------------------- /diskcache/cache.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package diskcache implements an on-disk cache of a remote file tree. 6 | // 7 | // A cache, of type *Cache, manages a local directory holding cached copies 8 | // of remote files obtained from a loader, of type Loader. The loader handles 9 | // reading remote files and checking whether a cached copy is still valid. 10 | // 11 | // To open a file, the cache first searches the local directory for a cached copy. 12 | // After being revalidated or fetched by the loader, a copy is considered 13 | // to be valid for a fixed duration, after which it is kept but considered expired. 14 | // If a copy exists and has not expired, the cache uses it. 15 | // If no copy exists or the copy has expired, the cache invokes the 16 | // loader to refresh the copy or fetch a new copy. 17 | // 18 | // A cached file is checked for expiration only during open, not during 19 | // individual reads. 20 | // 21 | // A cache may be used by multiple goroutines simultaneously. 22 | // Furthermore, multiple caches, even in separate processes, 23 | // may share a single directory, provided the caches use the same loader. 24 | // In this way, a cache can be reused across multiple program executions 25 | // and also be shared by concurrently executing programs. 26 | // If a cache directory is to be shared by concurrent programs, 27 | // it should usually be on local disk, because Unix file locking over 28 | // network file systems is almost always broken. 29 | // 30 | // There is no cache for file load errors. 31 | // 32 | // On-Disk Format 33 | // 34 | // Each cached file stored on disk using a name derived from the 35 | // SHA1 hash of the file name. The first three hex digits name a 36 | // subdirectory of the cache root directory, and the remaining 37 | // seventeen digits are used as the base name of a group of files 38 | // within that subdirectory: 39 | // 40 | // 123/45678901234567890.data 41 | // 123/45678901234567890.meta 42 | // 123/45678901234567890.used 43 | // 123/45678901234567890.next 44 | // 45 | // The .data file is the cached file content. If it exists, it is a complete copy, 46 | // never a partial one. 47 | // 48 | // The .meta file is the metadata associated with the .data file. 49 | // It contains the JSON encoding of a metadata struct. 50 | // The modification time of the .meta file is the time that the .data file 51 | // was last downloaded or revalidated. The .data file is considered to 52 | // be valid until that time plus the expiration period. 53 | // As a special case, if the .meta file has a modification time of 54 | // January 1, 1970 00:00:00 UTC (Unix time 0), the .data file is 55 | // considered expired, even if there is no expiration period. 56 | // 57 | // The .used file holds a single \n byte. It is rewritten each time 58 | // the .data file is opened to satisfy a file open operation. 59 | // The modification time of the .used file is therefore the time of the 60 | // last use of the file. 61 | // 62 | // The .next file holds the next version of the cached file, while it is 63 | // being downloaded. Once the download has completed, the cache 64 | // renames the .next file onto the .data file. This sequence avoids 65 | // overwriting the content of the .data file, which other clients 66 | // might still be reading. 67 | // 68 | // To allow multiple instances of a cache to manage a shared directory, 69 | // if a cache is doing the initial download of a file or revalidating 70 | // an expired copy or redownloading a new copy, it must hold an 71 | // exclusive BSD file lock on the .meta file (using flock(2)). 72 | // 73 | // After downloading a new file and installing it as a .data file, the 74 | // cache must check that it has not exceeded the on-disk size limit. 75 | // It checks by reading the sizes of all the .data files in the directory 76 | // tree and the modification times of the .used files. 77 | // It then removes the oldest cached files (.data, .meta, and .used) 78 | // until the data files again fit within the limit. To remove a file, 79 | // the cache must hold the .meta file lock. 80 | // 81 | // Warning Warning Warning 82 | // 83 | // This package is unfinished. In particular, DeleteAll and ExpireAll are unimplemented, 84 | // as is the code to delete files to stay within the maximum data size limit. 85 | // 86 | package diskcache 87 | 88 | import ( 89 | "crypto/sha1" 90 | "encoding/json" 91 | "fmt" 92 | "io/ioutil" 93 | "os" 94 | pathpkg "path" 95 | "path/filepath" 96 | "sync/atomic" 97 | "syscall" 98 | "time" 99 | ) 100 | 101 | // A Cache provides read-only access to a remote file tree, 102 | // caching opened files on local disk. 103 | type Cache struct { 104 | dir string 105 | loader Loader 106 | 107 | atomicExpiration int64 108 | atomicMaxData int64 109 | } 110 | 111 | // Loader is the interface Cache uses to load remote file content. 112 | // 113 | // The Load method fetches path from the remote location, writing it to target. 114 | // If the cache already has a (possibly expired) copy of the file, meta will be 115 | // the metadata returned by a previous call to Load. Otherwise meta is nil. 116 | // If the cached copy is still valid, Load should return cacheValid==true, 117 | // newMeta==meta (or an updated version), and err==nil. 118 | // Otherwise, Load should fetch the data, write it to target, and return 119 | // cacheValid==true, a new metadata block in newMeta, and err==nil. 120 | // 121 | // The elements in a file path are separated by slash ('/', U+002F) 122 | // characters, regardless of host operating system convention. 123 | type Loader interface { 124 | Load(path string, target *os.File, meta []byte) (cacheValid bool, newMeta []byte, err error) 125 | } 126 | 127 | // metaDisk is the on-disk metadata storage format 128 | type metaDisk struct { 129 | Path string 130 | CreateTime time.Time 131 | RefreshTime time.Time 132 | Load []byte 133 | } 134 | 135 | // New returns a new Cache that reads files from loader, 136 | // caching at most max bytes in the directory dir. 137 | // If dir does not exist, New will attempt to create it. 138 | func New(dir string, loader Loader) (*Cache, error) { 139 | // Create dir if necessary. 140 | fi, err := os.Stat(dir) 141 | if err != nil || !fi.IsDir() { 142 | if err := os.Mkdir(dir, 0777); err != nil { 143 | return nil, err 144 | } 145 | } 146 | 147 | c := &Cache{ 148 | dir: dir, 149 | loader: loader, 150 | } 151 | return c, nil 152 | } 153 | 154 | // SetExpiration sets the duration after which a cached copy is 155 | // considered to have expired. 156 | // If the duration d is zero (the default), cached copies never expire. 157 | func (c *Cache) SetExpiration(d time.Duration) { 158 | atomic.StoreInt64(&c.atomicExpiration, int64(d)) 159 | } 160 | 161 | func (c *Cache) expiration() time.Duration { 162 | return time.Duration(atomic.LoadInt64(&c.atomicExpiration)) 163 | } 164 | 165 | // SetMaxData sets the maximum bytes of data to hold in cached copies. 166 | // The limit is imposed in a best effort fashion. 167 | // In particular, it does not apply to old copies that have not yet been closed, 168 | // nor to new copies that have not finished downloading, 169 | // nor to cache metadata. 170 | func (c *Cache) SetMaxData(max int64) { 171 | atomic.StoreInt64(&c.atomicMaxData, max) 172 | } 173 | 174 | func (c *Cache) maxData() int64 { 175 | return atomic.LoadInt64(&c.atomicMaxData) 176 | } 177 | 178 | func (c *Cache) locate(path string) (cleaned, prefix string) { 179 | cleaned = pathpkg.Clean("/" + path) 180 | sum := sha1.Sum([]byte(cleaned)) 181 | h := fmt.Sprintf("%x", sum[:]) 182 | parent := filepath.Join(c.dir, h[0:3]) 183 | os.Mkdir(parent, 0777) 184 | return cleaned, filepath.Join(c.dir, h[0:3], h[3:]) 185 | } 186 | 187 | func (c *Cache) metaLock(prefix string) (*os.File, error) { 188 | name := prefix + ".meta" 189 | f, err := os.OpenFile(name, os.O_RDWR, 0666) 190 | if err != nil { 191 | return nil, err 192 | } 193 | if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { 194 | f.Close() 195 | return nil, err 196 | } 197 | return f, nil 198 | } 199 | 200 | // Open opens the file with the given path. 201 | // The caller is responsible for closing the returned file when finished with it. 202 | // The elements in a file path are separated by slash ('/', U+002F) 203 | // characters, regardless of host operating system convention. 204 | func (c *Cache) Open(path string) (*os.File, error) { 205 | path, prefix := c.locate(path) 206 | 207 | // Fast path: if not expired and data file exists, done. 208 | fi, err := os.Stat(prefix + ".meta") 209 | d := c.expiration() 210 | if err == nil && (d == 0 || time.Now().Before(fi.ModTime().Add(d))) { 211 | if data, err := os.Open(prefix + ".data"); err == nil { 212 | return data, nil 213 | } 214 | } 215 | 216 | // Otherwise lock .meta file, creating it if necessary. 217 | metaFile, err := c.metaLock(prefix) 218 | if err != nil { 219 | f, errCreate := os.OpenFile(prefix+".meta", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) 220 | if errCreate == nil { 221 | f.Close() 222 | } 223 | metaFile, err = c.metaLock(prefix) 224 | if err != nil { 225 | if errCreate != nil { 226 | return nil, fmt.Errorf("creating metadata file: %v", errCreate) 227 | } 228 | return nil, err 229 | } 230 | } 231 | defer metaFile.Close() 232 | 233 | // Double-check expiration. 234 | // We hold the meta lock, so nothing should change underfoot. 235 | fi, err = metaFile.Stat() 236 | if err != nil { 237 | metaFile.Close() 238 | return nil, fmt.Errorf("stat'ing metadata file: %v", err) 239 | } 240 | data, errData := os.Open(prefix + ".data") 241 | if (d == 0 || time.Now().Before(fi.ModTime().Add(d))) && errData == nil { 242 | return data, nil 243 | } 244 | if errData == nil { 245 | data.Close() 246 | } 247 | defer metaFile.Close() 248 | 249 | // Read metadata. 250 | js, err := ioutil.ReadAll(metaFile) 251 | if err != nil { 252 | // TODO(rsc): Delete? 253 | return nil, fmt.Errorf("reading metadata file: %v", err) 254 | } 255 | var meta metaDisk 256 | if len(js) > 0 { 257 | if err := json.Unmarshal(js, &meta); err != nil { 258 | // TODO(rsc): Delete? 259 | return nil, fmt.Errorf("reading metadata file: %v", err) 260 | } 261 | } 262 | 263 | if errData != nil { 264 | os.Remove(prefix + ".data") 265 | meta.Load = nil 266 | } 267 | 268 | next, err := os.OpenFile(prefix+".next", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) 269 | if err != nil { 270 | // Shouldn't happen, but maybe there is a stale .next file. Remove and try again. 271 | os.Remove(prefix + ".next") 272 | next, err = os.OpenFile(prefix+".next", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) 273 | if err != nil { 274 | return nil, fmt.Errorf("creating cached file: %v", err) 275 | } 276 | } 277 | 278 | cacheValid, metaLoad, err := c.loader.Load(path, next, meta.Load) 279 | if err != nil { 280 | next.Close() 281 | return nil, err 282 | } 283 | 284 | meta.RefreshTime = time.Now() 285 | var nextSize int64 286 | if cacheValid { 287 | next.Close() 288 | os.Remove(prefix + ".next") 289 | } else { 290 | meta.CreateTime = meta.RefreshTime 291 | fi, err := next.Stat() 292 | if err != nil { 293 | return nil, fmt.Errorf("writing cached file: %v", err) 294 | } 295 | nextSize = fi.Size() 296 | if err := next.Close(); err != nil { 297 | return nil, fmt.Errorf("writing cached file: %v", err) 298 | } 299 | if err := os.Rename(prefix+".next", prefix+".data"); err != nil { 300 | // Shouldn't happen, but we did get the file. Use it. 301 | return nil, fmt.Errorf("installing cached file: %v", err) 302 | } 303 | } 304 | 305 | meta.Load = metaLoad 306 | meta.Path = path 307 | js, err = json.Marshal(&meta) 308 | if err != nil { 309 | return nil, fmt.Errorf("preparing meta file: %v", err) 310 | } 311 | 312 | // Use WriteFile instead of metaFile.Write in order to force 313 | // truncation of the meta file when the new JSON is less than the old JSON. 314 | if err := ioutil.WriteFile(prefix+".meta", []byte(js), 0666); err != nil { 315 | // Unclear what state we are in now. 316 | // The write succeeded but close failed. 317 | // Cache is supposed to be on local disk, 318 | // so this should not be possible. 319 | // Hope for the best. 320 | _ = err 321 | } 322 | 323 | // We'd prefer to return the file named .data, not .next. Try. 324 | data, err = os.Open(prefix + ".data") 325 | if err != nil { 326 | return nil, err 327 | } 328 | metaFile.Close() 329 | 330 | if nextSize > 0 { 331 | c.checkDataLimit(nextSize) 332 | } 333 | 334 | return data, nil 335 | } 336 | 337 | func (c *Cache) ReadFile(path string) ([]byte, error) { 338 | f, err := c.Open(path) 339 | if err != nil { 340 | return nil, err 341 | } 342 | defer f.Close() 343 | return ioutil.ReadAll(f) 344 | } 345 | 346 | func (c *Cache) checkDataLimit(newSize int64) { 347 | } 348 | 349 | // Delete deletes the cache entry for the file with the given path. 350 | func (c *Cache) Delete(path string) error { 351 | path, prefix := c.locate(path) 352 | metaFile, err := c.metaLock(prefix) 353 | if err != nil { 354 | if os.IsNotExist(err) { 355 | return nil 356 | } 357 | return err 358 | } 359 | os.Remove(prefix + ".data") 360 | os.Remove(prefix + ".next") 361 | os.Remove(prefix + ".used") 362 | err = os.Remove(prefix + ".meta") 363 | metaFile.Close() 364 | if err != nil && !os.IsNotExist(err) { 365 | return err 366 | } 367 | return nil 368 | } 369 | 370 | // DeleteAll deletes all the cache entries. 371 | func (c *Cache) DeleteAll() error { 372 | panic("not implemented") 373 | } 374 | 375 | // Expire marks the cache entry for the file with the given path as expired. 376 | // The cache will have to revalidate the local copy, if any, before using it again. 377 | func (c *Cache) Expire(path string) error { 378 | path, prefix := c.locate(path) 379 | t := time.Unix(0, 0) 380 | err := os.Chtimes(prefix+".meta", t, t) 381 | if err != nil && !os.IsNotExist(err) { 382 | return err 383 | } 384 | return nil 385 | } 386 | 387 | // ExpireAll marks all cache entries as expired. 388 | func (c *Cache) ExpireAll() error { 389 | panic("not implemented") 390 | } 391 | --------------------------------------------------------------------------------