├── LICENSE ├── config.go ├── minihttp.go ├── log.go ├── site.go ├── README.md └── sitelist.go /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Kenny Levinsen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io/ioutil" 5 | "time" 6 | ) 7 | import "github.com/influxdata/toml" 8 | 9 | type Config struct { 10 | Root string 11 | 12 | DefaultHost string 13 | LogFile string 14 | LogLines int 15 | Development bool 16 | 17 | HTTP ConfigHTTP 18 | HTTPS ConfigHTTPS 19 | Command ConfigCommand 20 | } 21 | 22 | type ConfigHTTP struct { 23 | Address string 24 | } 25 | 26 | type ConfigHTTPS struct { 27 | Address string 28 | Cert string 29 | Key string 30 | } 31 | 32 | type ConfigCommand struct { 33 | Address string 34 | } 35 | 36 | type SiteConfig struct { 37 | General *SiteConfigGeneral 38 | Cache *SiteConfigCache 39 | Compression *SiteConfigCompression 40 | } 41 | 42 | type SiteConfigGeneral struct { 43 | NoDefaultFile bool 44 | DefaultFile string 45 | FancyFolder string 46 | } 47 | 48 | type SiteConfigCache struct { 49 | NoCacheFromMem bool 50 | NoCacheFromDisk bool 51 | CacheTimes map[string]Duration 52 | DefaultCacheTime Duration 53 | } 54 | 55 | type SiteConfigCompression struct { 56 | NoCompressFromMem bool 57 | NoCompressFromDisk bool 58 | MinSize int 59 | Blacklist []string 60 | } 61 | 62 | type Duration struct { 63 | time.Duration 64 | } 65 | 66 | func (d *Duration) UnmarshalTOML(text []byte) error { 67 | var err error 68 | d.Duration, err = time.ParseDuration(string(text[1 : len(text)-1])) 69 | return err 70 | } 71 | 72 | var ( 73 | threeMonths = Duration{Duration: 90 * 24 * time.Hour} 74 | oneWeek = Duration{Duration: 7 * 24 * time.Hour} 75 | DefaultCacheTimes = map[string]Duration{ 76 | ".woff": threeMonths, 77 | ".woff2": threeMonths, 78 | ".ttf": threeMonths, 79 | ".eot": threeMonths, 80 | ".otf": threeMonths, 81 | ".jpg": threeMonths, 82 | ".png": threeMonths, 83 | ".css": oneWeek, 84 | } 85 | DefaultSiteConfig = SiteConfig{ 86 | General: &SiteConfigGeneral{ 87 | DefaultFile: "index.html", 88 | FancyFolder: "/f/", 89 | }, 90 | Cache: &SiteConfigCache{ 91 | CacheTimes: DefaultCacheTimes, 92 | DefaultCacheTime: Duration{Duration: time.Hour}, 93 | }, 94 | Compression: &SiteConfigCompression{ 95 | Blacklist: []string{ 96 | ".jpg", 97 | ".zip", 98 | ".gz", 99 | ".tgz", 100 | }, 101 | MinSize: 256, 102 | }, 103 | } 104 | DefaultConfig = Config{ 105 | Root: "/srv/web", 106 | HTTP: ConfigHTTP{ 107 | Address: ":80", 108 | }, 109 | Command: ConfigCommand{ 110 | Address: ":65001", 111 | }, 112 | } 113 | ) 114 | 115 | func readSiteConf(p string) (*SiteConfig, error) { 116 | var ( 117 | b []byte 118 | err error 119 | conf SiteConfig 120 | ) 121 | 122 | if b, err = ioutil.ReadFile(p); err != nil { 123 | return &DefaultSiteConfig, err 124 | } 125 | 126 | if err := toml.Unmarshal(b, &conf); err != nil { 127 | return nil, err 128 | } 129 | 130 | if conf.General == nil { 131 | conf.General = DefaultSiteConfig.General 132 | } 133 | if conf.Cache == nil { 134 | conf.Cache = DefaultSiteConfig.Cache 135 | } 136 | if conf.Compression == nil { 137 | conf.Compression = DefaultSiteConfig.Compression 138 | } 139 | 140 | return &conf, nil 141 | } 142 | 143 | func readServerConf(p string) (*Config, error) { 144 | var ( 145 | b []byte 146 | err error 147 | conf Config 148 | ) 149 | 150 | if b, err = ioutil.ReadFile(p); err != nil { 151 | return &DefaultConfig, err 152 | } 153 | 154 | if err := toml.Unmarshal(b, &conf); err != nil { 155 | return nil, err 156 | } 157 | 158 | return &conf, nil 159 | } 160 | -------------------------------------------------------------------------------- /minihttp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | var ( 13 | configFile = flag.String("config", "minihttp.toml", "the config file to use") 14 | rootdir = flag.String("rootdir", "", "the dir to serve from") 15 | address = flag.String("address", "", "address to listen on") 16 | tlsAddress = flag.String("tlsAddress", "", "address to listen on for TLS") 17 | tlsCert = flag.String("tlsCert", "", "certificate for TLS") 18 | tlsKey = flag.String("tlsKey", "", "key for TLS") 19 | logFile = flag.String("logFile", "", "file to use for logging (overwites config)") 20 | command = flag.String("command", "", "address to use for command server") 21 | development = flag.Bool("dev", false, "reload on every request (if no config set)") 22 | quiet = flag.Bool("quiet", false, "disable logging") 23 | ) 24 | 25 | func main() { 26 | var err error 27 | flag.Parse() 28 | 29 | conf, err := readServerConf(*configFile) 30 | if err != nil { 31 | if conf == nil { 32 | fmt.Printf("Cannot read configuration for server: %v\n", err) 33 | return 34 | } 35 | fmt.Printf("Cannot read configuration for server, using default: %v\n", err) 36 | } 37 | 38 | // Apply command-line arguments over the provided configuration 39 | if *rootdir != "" { 40 | conf.Root = *rootdir 41 | } 42 | if *address != "" { 43 | conf.HTTP.Address = *address 44 | } 45 | if *tlsAddress != "" { 46 | conf.HTTPS.Address = *tlsAddress 47 | } 48 | if *tlsCert != "" { 49 | conf.HTTPS.Cert = *tlsCert 50 | } 51 | if *tlsKey != "" { 52 | conf.HTTPS.Key = *tlsKey 53 | } 54 | if *logFile != "" { 55 | conf.LogFile = *logFile 56 | } 57 | if *command != "" { 58 | conf.Command.Address = *command 59 | } 60 | if *development { 61 | conf.Development = *development 62 | } 63 | 64 | var logger func(string, ...interface{}) = (&Logger{Writer: os.Stderr}).Printf 65 | 66 | if *quiet { 67 | logger = func(string, ...interface{}) {} 68 | } else if conf.LogFile != "" { 69 | rw, err := NewRotateWriter(conf.LogFile, conf.LogLines) 70 | if err != nil { 71 | fmt.Fprintf(os.Stderr, "Could not initialize RotateWriter: %v\n", err) 72 | return 73 | } 74 | defer rw.Shutdown() 75 | 76 | go func() { 77 | fmt.Fprintf(os.Stderr, "RotateWriter terminated: %v\n", rw.Serve()) 78 | }() 79 | 80 | logger = (&Logger{Writer: rw}).Printf 81 | } 82 | 83 | if conf.Root == "" { 84 | fmt.Fprintf(os.Stderr, "Missing directory to serve\n") 85 | flag.Usage() 86 | return 87 | } 88 | 89 | if conf.HTTP.Address == "" && conf.HTTPS.Address == "" { 90 | fmt.Fprintf(os.Stderr, "Missing address to serve\n") 91 | flag.Usage() 92 | return 93 | } 94 | 95 | if conf.HTTPS.Address != "" && (conf.HTTPS.Cert == "" || conf.HTTPS.Key == "") { 96 | fmt.Fprintf(os.Stderr, "Missing key/cert for tls\n") 97 | flag.Usage() 98 | return 99 | } 100 | 101 | // Load sitelist 102 | sl := &sitelist{ 103 | root: conf.Root, 104 | defaulthost: conf.DefaultHost, 105 | logger: logger, 106 | } 107 | 108 | if err = sl.load(); err != nil { 109 | fmt.Fprintf(os.Stderr, "Unable to walk files: %v\n", err) 110 | return 111 | } 112 | 113 | sl.dev(*development) 114 | 115 | // Start your engines! 116 | if conf.Command.Address != "" { 117 | go func() { 118 | logger("Starting command server at: %s\n", conf.Command.Address) 119 | s := &http.Server{ 120 | Addr: conf.Command.Address, 121 | Handler: http.HandlerFunc(sl.cmdhttp), 122 | ReadTimeout: 10 * time.Second, 123 | WriteTimeout: 10 * time.Second, 124 | } 125 | logger("Command server failure: %v\n", s.ListenAndServe()) 126 | }() 127 | } 128 | 129 | var wg sync.WaitGroup 130 | if conf.HTTP.Address != "" { 131 | wg.Add(1) 132 | go func() { 133 | logger("Starting HTTP server at: %s\n", conf.HTTP.Address) 134 | s := &http.Server{ 135 | Addr: conf.HTTP.Address, 136 | Handler: http.HandlerFunc(sl.http), 137 | ReadTimeout: 30 * time.Second, 138 | WriteTimeout: 10 * time.Minute, 139 | } 140 | logger("HTTP server failure: %v\n", s.ListenAndServe()) 141 | wg.Done() 142 | }() 143 | } 144 | 145 | if conf.HTTPS.Address != "" { 146 | wg.Add(1) 147 | go func() { 148 | logger("Starting HTTPS server at: %s\n", conf.HTTPS.Address) 149 | s := &http.Server{ 150 | Addr: conf.HTTPS.Address, 151 | Handler: http.HandlerFunc(sl.http), 152 | ReadTimeout: 30 * time.Second, 153 | WriteTimeout: 10 * time.Minute, 154 | } 155 | 156 | logger("HTTPS server failure: %v\n", s.ListenAndServeTLS(conf.HTTPS.Cert, conf.HTTPS.Key)) 157 | wg.Done() 158 | }() 159 | } 160 | 161 | wg.Wait() 162 | logger("Terminated\n") 163 | } 164 | -------------------------------------------------------------------------------- /log.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | func itoa(buf []byte, i int, wid int) { 15 | // Assemble decimal in reverse order. 16 | bp := len(buf) - 1 17 | for i >= 10 || wid > 1 { 18 | wid-- 19 | q := i / 10 20 | buf[bp] = byte('0' + i - q*10) 21 | bp-- 22 | i = q 23 | } 24 | // i < 10 25 | buf[bp] = byte('0' + i) 26 | } 27 | 28 | // Logger allows for log package like logging without the overhead on a simple 29 | // io.Writer 30 | type Logger struct { 31 | Writer io.Writer 32 | } 33 | 34 | // Printf works like log.Printf. 35 | func (l *Logger) Printf(format string, v ...interface{}) { 36 | in := []byte(fmt.Sprintf(format, v...)) 37 | t := make([]byte, 23+len(in), 23+len(in)) 38 | 39 | // Fill in separators 40 | t[0] = '[' 41 | t[5] = '/' 42 | t[8] = '/' 43 | t[11] = ' ' 44 | t[14] = ':' 45 | t[17] = ':' 46 | t[20] = ']' 47 | t[21] = ':' 48 | t[22] = ' ' 49 | 50 | // Fill in values 51 | n := time.Now() 52 | year, month, day := n.Date() 53 | hour, min, sec := n.Clock() 54 | itoa(t[1:5], int(year), 4) 55 | itoa(t[6:8], int(month), 2) 56 | itoa(t[9:11], int(day), 2) 57 | itoa(t[12:14], int(hour), 2) 58 | itoa(t[15:17], int(min), 2) 59 | itoa(t[18:20], int(sec), 2) 60 | copy(t[23:], in) 61 | 62 | l.Writer.Write(t) 63 | } 64 | 65 | // RotateWriter is a io.Writer that can be used for logging with rotation. It 66 | // keeps 1 current logfile and 9 gzipped old logfiles. Rotation occurs when a 67 | // line limit is reached. A RotateWriter *must* be created through 68 | // NewRotateWriter. 69 | type RotateWriter struct { 70 | fp *os.File 71 | wg sync.WaitGroup 72 | count int 73 | 74 | // Queue is the internal logging channel. This channel should buffered. If 75 | // not set, a call to Write will panic. 76 | Queue chan []byte 77 | 78 | // MaxLines is the maximum amount of lines to have an a file. When a file 79 | // exceeds this limit, it will be rotated. 80 | MaxLines int 81 | 82 | // Filename is the name of the first file. Additional files will be named 83 | // Filename.N.gz, where 0 w.MaxLines { 103 | err := w.rotate() 104 | if err != nil { 105 | return err 106 | } 107 | } 108 | 109 | w.count++ 110 | _, err := w.fp.Write(entry) 111 | if err != nil { 112 | return err 113 | } 114 | } 115 | w.wg.Done() 116 | 117 | return nil 118 | } 119 | 120 | // Write satisfies the io.Writer interface. 121 | func (w *RotateWriter) Write(output []byte) (int, error) { 122 | w.Queue <- output 123 | return len(output), nil 124 | } 125 | 126 | // prepare checks if a file can be reused. If not, it will be rotated 127 | // immediately. 128 | func (w *RotateWriter) prepare() error { 129 | var err error 130 | if w.fp, err = os.OpenFile(w.Filename, os.O_RDWR|os.O_CREATE, 0666); err != nil { 131 | w.fp = nil 132 | return w.rotate() 133 | } 134 | 135 | // Let's check if the file already present is small enough to continue where 136 | // we left off. 137 | var ( 138 | n, i, lc int 139 | b = make([]byte, 16*1024) 140 | ) 141 | 142 | // Count the lines. 143 | for { 144 | if n, err = w.fp.Read(b); err != nil { 145 | if err == io.EOF { 146 | break 147 | } 148 | return err 149 | } 150 | 151 | for i = 0; i < n; i++ { 152 | if b[i] == '\n' { 153 | lc++ 154 | } 155 | } 156 | } 157 | 158 | // Reuse? 159 | if lc >= w.MaxLines { 160 | w.fp.Close() 161 | w.fp = nil 162 | return w.rotate() 163 | } 164 | 165 | if _, err := w.fp.Write([]byte("\n")); err != nil { 166 | w.fp.Close() 167 | w.fp = nil 168 | return err 169 | } 170 | 171 | return nil 172 | } 173 | 174 | // rotate shuffles the files around and performs GZIP'ing. 175 | func (w *RotateWriter) rotate() error { 176 | var err error 177 | 178 | // Close existing file if open 179 | if w.fp != nil { 180 | err = w.fp.Close() 181 | w.fp = nil 182 | if err != nil { 183 | return err 184 | } 185 | } 186 | 187 | var p string 188 | for i := 9; i > 0; i-- { 189 | p = fmt.Sprintf("%s.%d.gz", w.Filename, i) 190 | if _, err = os.Stat(p); err != nil { 191 | // We start in the high end, so we might get a few errors before we reach the first "real" log. 192 | continue 193 | } 194 | os.Rename(p, fmt.Sprintf("%s.%d.gz", w.Filename, i+1)) 195 | } 196 | 197 | // GZIP the newest file. 198 | b, _ := ioutil.ReadFile(w.Filename) 199 | buf := new(bytes.Buffer) 200 | c := gzip.NewWriter(buf) 201 | c.Write(b) 202 | c.Close() 203 | ioutil.WriteFile(fmt.Sprintf("%s.1.gz", w.Filename), buf.Bytes(), 0666) 204 | 205 | // Create a file. 206 | w.fp, err = os.Create(w.Filename) 207 | w.count = 0 208 | return err 209 | } 210 | 211 | // NewRotateWriter returns a fully initialized RotateWriter. 212 | func NewRotateWriter(name string, maxlines int) (*RotateWriter, error) { 213 | w := &RotateWriter{ 214 | Queue: make(chan []byte, 1024), 215 | Filename: name, 216 | MaxLines: maxlines, 217 | } 218 | 219 | return w, w.prepare() 220 | } 221 | -------------------------------------------------------------------------------- /site.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "crypto/sha256" 7 | "encoding/hex" 8 | "fmt" 9 | "io" 10 | "io/ioutil" 11 | "mime" 12 | "os" 13 | "path" 14 | "time" 15 | ) 16 | 17 | const ( 18 | cacheControlNoCache = "public, max-age=0, no-cache" 19 | cacheControlCache = "public, max-age=%.0f" 20 | ) 21 | 22 | func gz(b []byte) []byte { 23 | buf := new(bytes.Buffer) 24 | gz, _ := gzip.NewWriterLevel(buf, gzip.BestCompression) 25 | gz.Write(b) 26 | gz.Close() 27 | return buf.Bytes() 28 | } 29 | 30 | func hash(b []byte) string { 31 | // You can't address into the return value of Sum256 without putting it into 32 | // a variable first... GRR! 33 | h := sha256.Sum256(b) 34 | return hex.EncodeToString(h[:]) 35 | } 36 | 37 | // cache stores bodies and hashes for deduplication during sitelist reload. 38 | type cache struct { 39 | body []byte 40 | gbody []byte 41 | hash string 42 | ghash string 43 | } 44 | 45 | type resource struct { 46 | path string 47 | body []byte 48 | gbody []byte 49 | bodyReadCloser io.ReadCloser 50 | 51 | permitGZIP bool 52 | fromDisk bool 53 | cache string 54 | cnttype string 55 | loaded time.Time 56 | config *SiteConfig 57 | 58 | hash string 59 | ghash string 60 | } 61 | 62 | func (r *resource) updateTagCompress() { 63 | r.hash = hash(r.body) 64 | r.gbody = gz(r.body) 65 | r.ghash = hash(r.gbody) 66 | r.update() 67 | } 68 | 69 | func (r *resource) update() { 70 | var ( 71 | cache time.Duration 72 | cacheconf = r.config.Cache 73 | ext = path.Ext(r.path) 74 | compressconf = r.config.Compression 75 | mincompsize = compressconf.MinSize 76 | ) 77 | 78 | if r.cnttype = mime.TypeByExtension(ext); r.cnttype == "" { 79 | // Meh. 80 | r.cnttype = "text/plain; charset=utf-8" 81 | } 82 | 83 | if compressconf.MinSize == 0 { 84 | mincompsize = DefaultSiteConfig.Compression.MinSize 85 | } 86 | 87 | // Evaluate cache time. 88 | if (!r.fromDisk && !cacheconf.NoCacheFromMem) || 89 | (r.fromDisk && !cacheconf.NoCacheFromDisk) { 90 | 91 | if x, exists := cacheconf.CacheTimes[ext]; exists { 92 | cache = x.Duration 93 | } else { 94 | cache = cacheconf.DefaultCacheTime.Duration 95 | } 96 | } 97 | 98 | if cache == 0 { 99 | r.cache = cacheControlNoCache 100 | } else { 101 | r.cache = fmt.Sprintf(cacheControlCache, cache.Seconds()) 102 | } 103 | 104 | // Evaluate compression. 105 | if (r.fromDisk && compressconf.NoCompressFromDisk) || 106 | (!r.fromDisk && compressconf.NoCompressFromMem) || 107 | (r.body != nil && len(r.body) < mincompsize) { 108 | return 109 | } 110 | 111 | for _, v := range compressconf.Blacklist { 112 | if ext == v { 113 | return 114 | } 115 | } 116 | 117 | // If we know the size of the gzipped content already, we can evaluate if 118 | // the gzip variant is worth the effort. If I math'd this right, then the 119 | // threshold is a 10% size improvement. One could argue that ANY network 120 | // benefit is worth pursuing, but if the benefit is less than 10%, the 121 | // network benefit is negligible, and the server is basically just holding 122 | // the file in memory twice. 123 | if r.gbody != nil && float64(len(r.gbody))*1.1 >= float64(len(r.body)) { 124 | // Clear gbody. In case no resources decide to store the gzipped entity 125 | // variant, this allows for it to be garbage collected. 126 | r.gbody = nil 127 | return 128 | } 129 | 130 | r.permitGZIP = true 131 | } 132 | 133 | // site represents two sets of resources (one for HTTP, one for HTTPS) and a 134 | // related configuration. A site and its resource sets must not be mutated once 135 | // added to a sitelist, as access to them is intentionally not locked. Reloading 136 | // a must happen by replacing the site under sitelists' siteLock. 137 | type site struct { 138 | http map[string]*resource 139 | https map[string]*resource 140 | config *SiteConfig 141 | } 142 | 143 | func (s *site) addResource(diskpath, sitepath string, cachemap map[string]*cache, http, https bool) error { 144 | fi, err := os.Stat(diskpath) 145 | if err != nil { 146 | return err 147 | } 148 | 149 | if fi.IsDir() { 150 | diskpath = path.Join(diskpath, s.config.General.DefaultFile) 151 | if fi, err = os.Stat(diskpath); err != nil || fi.IsDir() { 152 | // We're here because the path addResource was called with was a 153 | // directory, and the directory either lacked the default file, or 154 | // the default file was a directory as well. Not being able to 155 | // associate a default file with a directory is not an error, so we 156 | // just skip the entry. 157 | return nil 158 | } 159 | } 160 | 161 | body, err := ioutil.ReadFile(diskpath) 162 | if err != nil { 163 | return err 164 | } 165 | 166 | r := &resource{ 167 | body: body, 168 | hash: hash(body), 169 | path: diskpath, 170 | config: s.config, 171 | loaded: fi.ModTime(), 172 | } 173 | 174 | // Check if we already have this content read so we can deduplicate it. 175 | if cached, exists := cachemap[r.hash]; exists { 176 | r.body = cached.body 177 | r.hash = cached.hash 178 | r.gbody = cached.gbody 179 | r.ghash = cached.ghash 180 | } else { 181 | r.gbody = gz(r.body) 182 | r.ghash = hash(r.gbody) 183 | 184 | cachemap[r.hash] = &cache{ 185 | body: r.body, 186 | gbody: r.gbody, 187 | hash: r.hash, 188 | ghash: r.ghash, 189 | } 190 | } 191 | 192 | r.update() 193 | 194 | if http { 195 | s.http[sitepath] = r 196 | } 197 | if https { 198 | s.https[sitepath] = r 199 | } 200 | 201 | return nil 202 | } 203 | 204 | func newSite(config *SiteConfig) *site { 205 | return &site{ 206 | http: make(map[string]*resource), 207 | https: make(map[string]*resource), 208 | config: config, 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # minihttp [![Go Report Card](https://goreportcard.com/badge/github.com/kennylevinsen/minihttp)](https://goreportcard.com/report/github.com/kennylevinsen/minihttp) 2 | 3 | minihttp is a small webserver written in go. It features: 4 | 5 | * Zero-config vhost. 6 | * Serve from memory, with server-wide deduplication by hash. 7 | * Files gzipped ahead of time for zero-delay compressed responses. 8 | * Sane cache-headers by default, or configurable per host per file-extension. 9 | * Development mode to reload files on every request. 10 | * Command-server for runtime-reload, status reports, and development mode toggling 11 | * Decent access logs with primitive X-Forwarded-For handling and user agents. 12 | * Sane defaults. Ain't nobody got time for config, so two parameters is all it takes ot start (4 for TLS). 13 | * Per-site from-disk folder for heavy assets or quick filesharing (with independent cache and compression settings). 14 | * Fast stuffs. 15 | 16 | There's no CGI. No per-path configuration. No rewrite rules. If you want something fancier, go see [Caddy](https://caddyserver.com). 17 | 18 | The command server thing is totally cool. Added a vhost? Removed one? Changed your site? `curl localhost:7000/reload`. Maybe even `curl localhost:7000/status` to see how much memory you're using post-deduplication on your files, and what vhosts are enabled. 19 | 20 | ### How to use 21 | 22 | 1. Get minihttp: 23 | ```text 24 | go get github.com/kennylevinsen/minihttp 25 | ``` 26 | 27 | 2. Make directory structure: 28 | ```text 29 | mkdir -p web/example.com/http 30 | mkdir -p web/example.com/https 31 | mkdir -p web/example.com/common 32 | echo "Hello from HTTP" > web/example.com/http/index.html 33 | echo "Hello from HTTPS" > web/example.com/https/index.html 34 | echo "Shared file" > web/example.com/common/shared.html 35 | ``` 36 | 37 | 3. Start minihttp: 38 | ```text 39 | # TLS can be enabled with -tlsAddress, -tlsCert and -tlsKey 40 | minihttp -rootdir web -address :8080 41 | ``` 42 | 43 | 4. Try things out: 44 | ```text 45 | $ curl http://localhost:8080 46 | Hello from HTTP 47 | ``` 48 | 49 | And that's it! If you want another vhost, just make another folder like the web one. You can either restart the server or use the command API to reload it (see the bottom of the README). If you want to run the server permanently, I suggest making a server configuration file, but that's up to you. 50 | 51 | If you want to run on port 80 on a Linux box, I would highly recommend setting the appropriate capabilities, rather than running the server as root: 52 | ```text 53 | sudo setcap cap_net_bind_service=+ep $(which minihttp) 54 | minihttp -rootdir web -address :80 55 | ``` 56 | 57 | ### Server configuration 58 | 59 | Ain't got time for documentation, so here is a configuration file with every option set and a comment: 60 | 61 | ```text 62 | # This is where we will server from. 63 | root = "/srv/web" 64 | 65 | # This is the host that will be used if the requested one is not found. 66 | defaultHost = "example.com" 67 | 68 | # The log/access file. 69 | logFile = "/var/log/minihttp/minihttp.log" 70 | 71 | # How many lines to write before the log is rotated and gzipped. 72 | logLines = 8192 73 | 74 | # Whether or not to start in development mode. 75 | development = false 76 | 77 | # The address for HTTP operation. 78 | [http] 79 | address = ":80" 80 | 81 | # The address, certificate and key for TLS operation. 82 | [https] 83 | address = ":443" 84 | cert = "cert.pem" 85 | key = "key.pem" 86 | 87 | # The address to serve the command interface on. 88 | [command] 89 | address = ":7000" 90 | ``` 91 | 92 | Load with: 93 | ```text 94 | minihttp -config config.toml 95 | ``` 96 | 97 | ### Site configuration 98 | 99 | Like above, all options set and comments: 100 | 101 | ```text 102 | [general] 103 | # This disables default files for this repo. 104 | noDefaultFile = false 105 | 106 | # The default file. 107 | defaultFile = "index.html" 108 | 109 | # The from-disk folder prefix. If a URL matches this prefix, the file will 110 | # be fetched from the fancy/ folder of the site. Note that the /f/ will be 111 | # included, so /f/hello will be fetched from site/fancy/f/hello. 112 | fancyFolder = "/f/" 113 | 114 | [cache] 115 | # This flips the cache headers to be cache-busting for memory content. 116 | noCacheFromMem = false 117 | 118 | # This flips the cache headers to be cache-busting for disk content. 119 | noCacheFromDisk = false 120 | 121 | # This is the default cache time to advertise for content that does not have 122 | # a specific cache time set. 123 | defaultCacheTime = "1h" 124 | 125 | # Specific cache times. The syntax is that which time.ParseDuration accepts. 126 | [cache.cacheTimes] 127 | ".woff" ="90d" 128 | ".woff2" = "90d" 129 | ".ttf" = "90d" 130 | ".eot" = "90d" 131 | ".otf" = "90d" 132 | ".jpg" = "90d" 133 | ".png" = "90d" 134 | ".css" = "7d" 135 | 136 | [compression] 137 | # This disables GZIP compression for memory content. 138 | noCompressFromMem = false 139 | 140 | # This disables GZIP compression for disk content. Disabling this lowers CPU 141 | # load if there are many requests to from-disk files, but increases 142 | # bandwidth consumption. Memory content does not have this issue. 143 | noCompressFromDisk = true 144 | 145 | # The minimum file size a file must have to even consider compression. 146 | minSize = 256 147 | 148 | # The minimum size reduction a file must see after compression to use the 149 | # compressed result. 150 | minRatio = 1.1 151 | 152 | # Files that will never be compressed. Recompressing compressed files is 153 | # mostly just a waste of cycles for both the server and client. 154 | blacklist = [".jpg", ".zip", ".gz", ".tgz"] 155 | ``` 156 | 157 | Given the previously mentioned file structure, put the file in web/example.com/config.toml and reload the web server. 158 | 159 | ### Error files 160 | 161 | The server includes a hardcoded 404 page for when files don't exist or can't be read, as well as a 500 page for when a hostname is not known to the server. 162 | 163 | A 404.html and 500.html can be put in the rootdir ("web/404.html" and "web/500.html" in the example folder above), which will replace the builtin variants. Furthermore, a 404.html can be put in the site folder ("web/example.com/404.html" in the example folder above), which will apply only to that site. 164 | 165 | ### Command API 166 | 167 | Bah, I'll just show you this too: 168 | 169 | ```text 170 | $ # Reload vhosts, their configurations and files. 171 | $ curl localhost:7000/reload 172 | OK 173 | 174 | $ # Enable development mode. 175 | $ curl localhost:7000/devel 176 | OK 177 | 178 | $ # Check the server status. 179 | $ curl localhost:7000/status 180 | Sites (2): 181 | example.com (125 HTTP resources, 125 HTTPS resources) 182 | other.com (133 HTTP resources, 133 HTTPS resources) 183 | 184 | Settings: 185 | Root: /somewhere/web 186 | Dev mode: true 187 | Global no such host: false 188 | Global no such file: false 189 | 190 | Stats: 191 | Total plain file size: 23MB 192 | Total gzip file size 11MB 193 | Total files: 225 194 | 195 | $ # Disable development mode (production mode). 196 | $ curl localhost:7000/prod 197 | OK 198 | 199 | $ # Check status to see the change. 200 | $ curl localhost:7000/status 201 | Sites (2): 202 | example.com (125 HTTP resources, 125 HTTPS resources) 203 | other.com (133 HTTP resources, 133 HTTPS resources) 204 | 205 | Settings: 206 | Root: /somewhere/web 207 | Dev mode: false 208 | Global no such host: false 209 | Global no such file: false 210 | 211 | Stats: 212 | Total plain file size: 23MB 213 | Total gzip file size 11MB 214 | Total files: 225 215 | ``` 216 | 217 | (Yes, I typed in some random numbers. If you want real numbers, run it yourself!) 218 | -------------------------------------------------------------------------------- /sitelist.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "compress/gzip" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "net" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | "path" 13 | "path/filepath" 14 | "runtime" 15 | "strings" 16 | "sync" 17 | "sync/atomic" 18 | "time" 19 | ) 20 | 21 | var ( 22 | defaultNoSuchHost = &resource{ 23 | body: []byte("no such host"), 24 | loaded: time.Now(), 25 | cnttype: "text/plain; charset=utf-8", 26 | cache: "public, max-age=0, no-cache", 27 | hash: "W/\"go-far-away\"", 28 | path: "/403.html", 29 | } 30 | 31 | defaultNoSuchFile = &resource{ 32 | body: []byte("no such file"), 33 | loaded: time.Now(), 34 | cnttype: "text/plain; charset=utf-8", 35 | cache: "public, max-age=0, no-cache", 36 | hash: "W/\"go-away\"", 37 | path: "/404.html", 38 | } 39 | ) 40 | 41 | // quickHeaderGet bypasses net/textproto/MIMEHeader.CanonicalMIMEHeaderKey, 42 | // which would otherwise have been run on the key. This is a waste of time if we 43 | // manually canonicalize the query key. 44 | func quickHeaderGet(key string, h http.Header) (string, bool) { 45 | val := h[key] 46 | if len(val) == 0 { 47 | return "", false 48 | } 49 | return val[0], true 50 | } 51 | 52 | // quickHeaderGetLast works like quickHeaderGet, but fetches the last instance 53 | // of the header. 54 | func quickHeaderGetLast(key string, h http.Header) (string, bool) { 55 | val := h[key] 56 | if len(val) == 0 { 57 | return "", false 58 | } 59 | return val[len(val)-1], true 60 | } 61 | 62 | // sitelist manages a set of sites. 63 | type sitelist struct { 64 | sites map[string]*site 65 | siteLock sync.RWMutex 66 | 67 | errNoSuchHost *resource 68 | errNoSuchFile *resource 69 | 70 | root string 71 | devmode uint32 72 | defaulthost string 73 | logger func(string, ...interface{}) 74 | 75 | // stats 76 | filesInMemory int 77 | plainBytesInMemory int 78 | gzipBytesInMemory int 79 | } 80 | 81 | func (sl *sitelist) access(req *http.Request, status int) { 82 | var forwardAddr, userAgent, referer string 83 | var exists bool 84 | if forwardAddr, exists = quickHeaderGetLast("X-Forwarded-For", req.Header); !exists { 85 | forwardAddr = "-" 86 | } 87 | if userAgent, exists = quickHeaderGet("User-Agent", req.Header); !exists { 88 | userAgent = "-" 89 | } 90 | if referer, exists = quickHeaderGet("Referer", req.Header); !exists { 91 | referer = "-" 92 | } 93 | sl.logger("%s %s %d \"%s %v %s\" \"%s\" \"%s\"\n", req.RemoteAddr, forwardAddr, status, req.Method, req.URL, req.Proto, referer, userAgent) 94 | } 95 | 96 | func unitize(thing int) string { 97 | if thing > 1024*1024*1024 { 98 | return fmt.Sprintf("%.1fGB", float64(thing)/(1024*1024*1024)) 99 | } else if thing > 1024*1024 { 100 | return fmt.Sprintf("%.1fMB", float64(thing)/(1024*1024)) 101 | } else if thing > 1024 { 102 | return fmt.Sprintf("%.1fKB", float64(thing)/1024) 103 | } 104 | 105 | return fmt.Sprintf("%dB", thing) 106 | } 107 | 108 | func (sl *sitelist) status() string { 109 | sl.siteLock.RLock() 110 | defer sl.siteLock.RUnlock() 111 | 112 | var sites string 113 | 114 | for host, site := range sl.sites { 115 | sites += fmt.Sprintf("\t%s (%d HTTP resources, %d HTTPS resources)\n", host, len(site.http), len(site.https)) 116 | } 117 | 118 | return fmt.Sprintf(` 119 | Sites: (%d): 120 | %s 121 | 122 | Settings: 123 | Root: %s 124 | Dev mode: %t 125 | Global no such host: %t 126 | Global no such file: %t 127 | 128 | Stats: 129 | Total plain file size: %s 130 | Total gzip file size: %s 131 | Total files: %d 132 | `, 133 | len(sl.sites), 134 | sites, 135 | sl.root, 136 | atomic.LoadUint32(&sl.devmode) == 1, 137 | sl.errNoSuchHost != nil, 138 | sl.errNoSuchFile != nil, 139 | unitize(sl.plainBytesInMemory), 140 | unitize(sl.gzipBytesInMemory), 141 | sl.filesInMemory) 142 | } 143 | 144 | // dev flips the development mode switch. 145 | func (sl *sitelist) dev(active bool) { 146 | if active { 147 | atomic.StoreUint32(&sl.devmode, 1) 148 | } else { 149 | atomic.StoreUint32(&sl.devmode, 0) 150 | } 151 | } 152 | 153 | // fetch retrieves the file for a given URL. This is where the work happens, so 154 | // it must stay simple and fast. 155 | func (sl *sitelist) fetch(url *url.URL) (*resource, int) { 156 | var ( 157 | host, p string 158 | err error 159 | exists bool 160 | s *site 161 | res *resource 162 | rmap map[string]*resource 163 | ) 164 | 165 | if host, _, err = net.SplitHostPort(url.Host); err != nil { 166 | host = url.Host 167 | } 168 | 169 | // If the devmode flag is set, we reload the entire sitelist. This is by far 170 | // the easiest. 171 | if atomic.LoadUint32(&sl.devmode) == 1 { 172 | sl.load() 173 | } 174 | 175 | sl.siteLock.RLock() 176 | s, exists = sl.sites[host] 177 | sl.siteLock.RUnlock() 178 | 179 | // Check if the host existed. If not, check the default host, and if that's not there either, return a 403 180 | if !exists { 181 | sl.siteLock.RLock() 182 | s, exists = sl.sites[sl.defaulthost] 183 | sl.siteLock.RUnlock() 184 | if !exists { 185 | if sl.errNoSuchHost != nil { 186 | return sl.errNoSuchHost, http.StatusForbidden 187 | } 188 | return defaultNoSuchHost, http.StatusForbidden 189 | } 190 | } 191 | 192 | // TODO(kl): Consider moving this to the from-disk branch. That means that 193 | // /meticulous/../fantastical will not render correctly as /fantastical, but 194 | // I do not really find this to be an issue. It could shave some cycles off 195 | // in-memory resource fetch. 196 | p = path.Clean(url.Path) 197 | 198 | // First, let's try for the file in memory. If it's found, we return it 199 | // immediately. This is the path we want to be the fastest. 200 | switch url.Scheme { 201 | case "https": 202 | rmap = s.https 203 | default: 204 | rmap = s.http 205 | } 206 | 207 | if res, exists = rmap[p]; exists { 208 | return res, 200 209 | } 210 | 211 | // The file was not in memory, so see if it's available in the from-disk 212 | // folder. We first verify if the path prefix matches the permitted 213 | // from-disk prefix, and if so, try to load the resource directly, without 214 | // storing it in the resource map. The source is loaded from the "fancy" 215 | // folder of the vhost directory. 216 | if strings.HasPrefix(p, s.config.General.FancyFolder) { 217 | p = path.Join(sl.root, host, "fancy", p) 218 | 219 | // We make a streaming resource. The beefit of this is a much lower 220 | // time-to-first-byte, as well as lower memory consumption. 221 | if x, err := os.Open(p); err == nil { 222 | if fi, err := x.Stat(); err == nil && !fi.IsDir() { 223 | res = &resource{ 224 | bodyReadCloser: x, 225 | path: p, 226 | config: s.config, 227 | loaded: fi.ModTime(), 228 | hash: fmt.Sprintf("W/\"%x-%xi\"", fi.ModTime().Unix(), fi.Size()), 229 | ghash: fmt.Sprintf("W/\"%x-%xg\"", fi.ModTime().Unix(), fi.Size()), 230 | fromDisk: true, 231 | permitGZIP: true, 232 | } 233 | res.update() 234 | return res, 200 235 | } 236 | 237 | // We couldn't use the file, so close it up and continue to file not 238 | // found handling. 239 | x.Close() 240 | } 241 | } 242 | 243 | // 404 File Not Found time! We try to fetch a 404.html document from the 244 | // site. The 404 document is served from 3 locations, as available: 245 | // 246 | // * The vhost directory itself, if available. 247 | // * The root directory itself, if available. 248 | // * The configured default document. 249 | // 250 | if res, exists = rmap["/404.html"]; exists { 251 | return res, http.StatusNotFound 252 | } 253 | 254 | if sl.errNoSuchFile != nil { 255 | return sl.errNoSuchFile, http.StatusNotFound 256 | } 257 | 258 | return defaultNoSuchFile, http.StatusNotFound 259 | } 260 | 261 | // http is the actual HTTP handler, serving the requests as quickly as it can. 262 | // It implements http.Handler. 263 | func (sl *sitelist) http(w http.ResponseWriter, req *http.Request) { 264 | var ( 265 | head, exists, useGZIP bool 266 | now = time.Now() 267 | h = w.Header() 268 | err error 269 | ) 270 | 271 | // We patch up the URL object for convenience. 272 | req.URL.Host = req.Host 273 | if req.TLS != nil { 274 | req.URL.Scheme = "https" 275 | } else { 276 | req.URL.Scheme = "http" 277 | } 278 | 279 | // Evaluate method 280 | switch req.Method { 281 | case "GET": 282 | // do nothing 283 | case "HEAD": 284 | head = true 285 | default: 286 | h["Content-Type"] = []string{"text/plain; charset=utf-8"} 287 | w.WriteHeader(http.StatusMethodNotAllowed) 288 | w.Write(nil) // Do I need to call Write? :/ 289 | sl.access(req, http.StatusMethodNotAllowed) 290 | return 291 | } 292 | 293 | r, status := sl.fetch(req.URL) 294 | 295 | hash := r.hash 296 | body := r.body 297 | if r.permitGZIP { 298 | var acceptEncoding string 299 | if acceptEncoding, exists = quickHeaderGet("Accept-Encoding", req.Header); exists && strings.Contains(acceptEncoding, "gzip") { 300 | useGZIP = true 301 | body = r.gbody 302 | hash = r.ghash 303 | h["Content-Encoding"] = []string{"gzip"} 304 | } 305 | } 306 | 307 | // Set headers 308 | h["Content-Type"] = []string{r.cnttype} 309 | h["Date"] = []string{now.Format(time.RFC1123)} 310 | h["Cache-Control"] = []string{r.cache} 311 | h["Last-Modified"] = []string{r.loaded.Format(time.RFC1123)} 312 | h["Etag"] = []string{hash} 313 | h["Vary"] = []string{"Accept-Encoding"} 314 | 315 | // Should we send a 304 Not Modified? We do this if the file was found, and 316 | // the cache information headers from the client match the file we have 317 | // available. 304's are, annoyingly so, only partially like HEAD request, as 318 | // it does not contain encoding and length information about the payload 319 | // like a HEAD does. This means we have to interrupt the request at 320 | // different points of their execution... *sigh* 321 | if status == http.StatusOK { 322 | var cacheResponse bool 323 | var ifNoneMatch, ifModifiedSince string 324 | if ifNoneMatch, exists = quickHeaderGet("If-None-Match", req.Header); exists { 325 | cacheResponse = ifNoneMatch == r.hash || ifNoneMatch == "*" 326 | } else if ifModifiedSince, exists = quickHeaderGet("If-Modified-Since", req.Header); exists { 327 | var imsdate time.Time 328 | imsdate, err = time.Parse(time.RFC1123, ifModifiedSince) 329 | cacheResponse = err == nil && !imsdate.IsZero() && imsdate.After(r.loaded) 330 | } 331 | 332 | if cacheResponse { 333 | w.WriteHeader(http.StatusNotModified) 334 | sl.access(req, http.StatusNotModified) 335 | return 336 | } 337 | } 338 | 339 | // Are we dealing with a streaming resource (That is, a file)? 340 | if r.bodyReadCloser != nil { 341 | w.WriteHeader(status) 342 | sl.access(req, status) 343 | if head { 344 | return 345 | } 346 | 347 | if useGZIP { 348 | gz := gzip.NewWriter(w) 349 | _, err = io.Copy(gz, r.bodyReadCloser) 350 | gz.Close() 351 | } else { 352 | _, err = io.Copy(w, r.bodyReadCloser) 353 | } 354 | sl.logger("[%s]: error writing response: %v\n", req.RemoteAddr, err) 355 | r.bodyReadCloser.Close() 356 | return 357 | } 358 | 359 | h["Content-Length"] = []string{fmt.Sprintf("%d", len(body))} 360 | 361 | w.WriteHeader(status) 362 | sl.access(req, status) 363 | 364 | // HEAD? 365 | if head { 366 | return 367 | } 368 | 369 | if _, err = w.Write(body); err != nil { 370 | sl.logger("[%s]: error writing response: %v\n", req.RemoteAddr, err) 371 | } 372 | } 373 | 374 | // cmdhttp is the command handler. It implements http.Handler. 375 | func (sl *sitelist) cmdhttp(w http.ResponseWriter, req *http.Request) { 376 | switch req.URL.Path { 377 | case "/devel": 378 | sl.logger("[%s]: enabling development mode\n", req.RemoteAddr) 379 | sl.dev(true) 380 | 381 | w.Header().Set("Content-Type", "text/plain") 382 | w.Write([]byte("OK\n")) 383 | case "/prod": 384 | sl.logger("[%s]: disabling development mode\n", req.RemoteAddr) 385 | sl.dev(false) 386 | 387 | w.Header().Set("Content-Type", "text/plain") 388 | w.Write([]byte("OK\n")) 389 | case "/reload": 390 | sl.logger("[%s]: reloading\n", req.RemoteAddr) 391 | if err := sl.load(); err != nil { 392 | sl.logger("[%s]: reload failed: %v\n", req.RemoteAddr, err) 393 | w.WriteHeader(http.StatusInternalServerError) 394 | w.Write([]byte(fmt.Sprintf("reload failed: %v\n", err))) 395 | return 396 | } 397 | 398 | w.Header().Set("Content-Type", "text/plain") 399 | w.Write([]byte("OK\n")) 400 | case "/status": 401 | sl.logger("[%s]: status request\n", req.RemoteAddr) 402 | w.Header().Set("Content-Type", "text/plain") 403 | w.Write([]byte(sl.status())) 404 | default: 405 | w.WriteHeader(http.StatusNotFound) 406 | w.Write([]byte("Unknown command\n")) 407 | } 408 | } 409 | 410 | // load reads a root server structure in the format: 411 | // 412 | // example.com/scheme/index.html 413 | // example.com/config.toml 414 | // 415 | // The initial part of the path is the domain name, used for vhost purposes. The 416 | // second part is the scheme, which can be "http", "https", "common" or "fancy". 417 | // The first two are unique to their scheme. Files in "common" are available 418 | // from both HTTP and HTTPS. Files in "fancy" are used to be served directly 419 | // from disk, and is loaded on-request. The configuration specifies which 420 | // (virtual) folder should serve files from fancy. All other files are served 421 | // completely from memory. 422 | func (sl *sitelist) load() error { 423 | sl.logger("Reloading root\n") 424 | 425 | // We store the results of the load in local temporary variables, and only 426 | // install them on success. It also shortens the time we need to hold the 427 | // lock for reload. 428 | var ( 429 | sites = make(map[string]*site) 430 | errNoSuchHost *resource 431 | errNoSuchFile *resource 432 | n = time.Now() 433 | ) 434 | 435 | // list root 436 | files, err := ioutil.ReadDir(sl.root) 437 | if err != nil { 438 | return err 439 | } 440 | 441 | cachemap := make(map[string]*cache) 442 | 443 | for _, s := range files { 444 | name := s.Name() 445 | p := path.Join(sl.root, name) 446 | 447 | // If we found a directory, check if it's a server global resource. 448 | if !s.IsDir() { 449 | res := &resource{ 450 | path: p, 451 | config: &DefaultSiteConfig, 452 | loaded: n, 453 | } 454 | 455 | switch name { 456 | case "404.html": 457 | errNoSuchFile = res 458 | case "403.html": 459 | errNoSuchHost = res 460 | default: 461 | continue 462 | } 463 | 464 | if res.body, err = ioutil.ReadFile(p); err != nil { 465 | return err 466 | } 467 | res.updateTagCompress() 468 | 469 | continue 470 | } 471 | 472 | schemes, err := ioutil.ReadDir(p) 473 | if err != nil { 474 | return err 475 | } 476 | 477 | conf, err := readSiteConf(path.Join(sl.root, name, "config.toml")) 478 | if err != nil { 479 | if conf == nil { 480 | return err 481 | } 482 | sl.logger("Cannot read configuration for %s, using default: %v\n", name, err) 483 | } 484 | 485 | s := newSite(conf) 486 | sites[name] = s 487 | 488 | for _, c := range schemes { 489 | if !c.IsDir() { 490 | continue 491 | } 492 | 493 | scheme := c.Name() 494 | http := scheme == "http" || scheme == "common" 495 | https := scheme == "https" || scheme == "common" 496 | start := path.Join(sl.root, name, scheme) 497 | err := filepath.Walk(start, func(p string, info os.FileInfo, err error) error { 498 | if err != nil { 499 | return err 500 | } 501 | 502 | p2 := p[len(start):] 503 | if len(p2) == 0 { 504 | p2 = "/" 505 | } 506 | 507 | return s.addResource(p, p2, cachemap, http, https) 508 | }) 509 | 510 | if err != nil { 511 | return err 512 | } 513 | } 514 | } 515 | 516 | var plainInMemory, gzipInMemory int 517 | for _, v := range cachemap { 518 | plainInMemory += len(v.body) 519 | gzipInMemory += len(v.gbody) 520 | } 521 | 522 | // We're done, so install the results. 523 | sl.siteLock.Lock() 524 | sl.sites = sites 525 | sl.errNoSuchFile = errNoSuchFile 526 | sl.errNoSuchHost = errNoSuchHost 527 | sl.filesInMemory = len(cachemap) 528 | sl.plainBytesInMemory = plainInMemory 529 | sl.gzipBytesInMemory = gzipInMemory 530 | sl.siteLock.Unlock() 531 | 532 | // We might have created a lot of garbage, so just run the GC now. 533 | runtime.GC() 534 | 535 | return nil 536 | } 537 | --------------------------------------------------------------------------------