├── .gitattributes
├── LICENSE
├── hls_server
├── config.go
├── config_test.go
├── http_server.go
├── player.go
└── requests.go
├── integration_tests
├── streaming_test.go
└── wrapper.go
├── kassets
├── assets
│ └── index.html
└── bindata.go
├── kfs
├── config.go
├── filesystem.go
├── key.go
├── kfs_test.go
├── metadata.go
├── queue.go
├── utils.go
└── writer.go
├── ktypes
├── abr.go
├── api.go
├── debug.go
├── key.go
└── key_test.go
├── main.go
├── media
├── rtmp_demuxer.go
└── rtmp_demuxer_test.go
├── noop_api
└── noop_api.go
├── rtmp_server
├── config.go
├── requests.go
├── rtmp_server.go
└── rtmp_server_test.go
├── test_assets
└── vk_sync.mp4
├── vsync
├── checked_map.go
└── mutex.go
└── worker
├── config.go
├── kive_test.go
├── kive_worker.go
├── kive_worker_test.go
└── manifest.go
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.mp4 filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 V Kontakte, LLC.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 | The current MIT License does not cover use of the video file (the “Video”)
24 | attached hereto. Any purpose of use of the Video is prohibited, except for
25 | non-commercial use of the Video during testing of the current Software.
26 | None of the rights for V Kontakte’s design and logs are granted for further
27 | use, commercial or non-commercial, for any derivatives and adaptations.
--------------------------------------------------------------------------------
/hls_server/config.go:
--------------------------------------------------------------------------------
1 | package hls_server
2 |
3 | type LiveHlsConfig struct {
4 | HttpHost string
5 | HttpPort int
6 |
7 | LiveStreamPrefix string
8 | AbrStreamPrefix string
9 | AbrStreamMasterPrefix string
10 | LivePlaylist string
11 | ChunksPlaylist string
12 | LiveChunk string
13 |
14 | DvrStreamPrefix string
15 | DvrPlaylist string
16 | DvrChunk string
17 |
18 | VodPrefix string
19 | VodManifest string
20 | VodChunk string
21 |
22 | Player string
23 | }
24 |
25 | func (c *LiveHlsConfig) HandleDvrPlaylistUrl() string {
26 | return c.DvrStreamPrefix + "/" + c.DvrPlaylist
27 | }
28 |
29 | func (c *LiveHlsConfig) HandleDvrAbrPlaylistUrl() string {
30 | return c.AbrStreamMasterPrefix + c.AbrStreamPrefix + "/" + c.DvrPlaylist
31 | }
32 |
33 | func (c *LiveHlsConfig) HandleLiveChunkUrl() string {
34 | return c.LiveStreamPrefix + "/" + c.LiveChunk
35 | }
36 |
37 | func (c *LiveHlsConfig) HandleAbrChunkUrl() string {
38 | return c.AbrStreamMasterPrefix + c.AbrStreamPrefix + "/" + c.LiveChunk
39 | }
40 |
41 | func (c *LiveHlsConfig) HandleDvrChunkUrl() string {
42 | return c.DvrStreamPrefix + "/" + c.DvrChunk
43 | }
44 |
45 | func (c *LiveHlsConfig) HandleDvrAbrChunkUrl() string {
46 | return c.AbrStreamMasterPrefix + c.AbrStreamPrefix + "/" + c.DvrChunk
47 | }
48 |
49 | func (c *LiveHlsConfig) HandleLivePlaylistUrl() string {
50 | return c.LiveStreamPrefix + "/" + c.LivePlaylist
51 | }
52 |
53 | func (c *LiveHlsConfig) HandleAbrChunksPlaylistUrl() string {
54 | return c.AbrStreamMasterPrefix + c.AbrStreamPrefix + "/" + c.ChunksPlaylist
55 | }
56 |
57 | func (c *LiveHlsConfig) HandleAbrMasterPlaylistUrl() string {
58 | return c.AbrStreamMasterPrefix + "/" + c.LivePlaylist
59 | }
60 |
61 | func (c *LiveHlsConfig) HandleVodManifestUrl() string {
62 | return c.VodPrefix + "/" + c.VodManifest
63 | }
64 |
65 | func (c *LiveHlsConfig) HandleVodChunkUrl() string {
66 | return c.VodPrefix + "/" + c.VodChunk
67 | }
68 |
69 | func (c *LiveHlsConfig) HandleLivePlayerUrl() string {
70 | return c.Player + c.AbrStreamMasterPrefix
71 | }
72 |
73 | func NewLiveHlsConfig() LiveHlsConfig {
74 | c := LiveHlsConfig{
75 | HttpHost: "",
76 | HttpPort: 80,
77 |
78 | AbrStreamPrefix: "/{stream_type:source|256p|352p|360p|384p|480p|512p|720p|1080p}",
79 | AbrStreamMasterPrefix: "/{app:kiveabr}/{stream_name:[0-9a-zA-Z_-]+}",
80 | LiveStreamPrefix: "/{app:live|kiveabr}/{stream_name:[0-9a-zA-Z_-]+}",
81 | LiveChunk: "{stream_name_chunk}-{chunk_name}",
82 | LivePlaylist: "playlist.m3u8",
83 | ChunksPlaylist: "chunks.m3u8",
84 | DvrStreamPrefix: "/{app}/{stream_name}",
85 | DvrChunk: "{stream_name_chunk}-dvr-{chunk_name}",
86 | DvrPlaylist: "{playlist|chunks}_dvr_range-{from:[0-9]+}-{duration:[0-9]+}.m3u8",
87 | VodPrefix: "/internal/{stream_type}/{stream_name}",
88 | VodChunk: "{stream_name_chunk}-{chunk_name}",
89 | VodManifest: "manifest_{stream_name}_{from:[0-9]+}_{duration:[0-9]+}.json",
90 | Player: "/player",
91 | }
92 | return c
93 | }
94 |
--------------------------------------------------------------------------------
/hls_server/config_test.go:
--------------------------------------------------------------------------------
1 | package hls_server
2 |
3 | import (
4 | "github.com/gorilla/mux"
5 | "testing"
6 |
7 | "github.com/sirupsen/logrus"
8 | "github.com/stretchr/testify/assert"
9 | "github.com/stretchr/testify/require"
10 | "io"
11 | "net/http"
12 | "net/http/httptest"
13 | )
14 |
15 | func TestLiveHls_LivePlaylistPattern(t *testing.T) {
16 | r := mux.NewRouter()
17 | c := NewLiveHlsConfig()
18 | rr := httptest.NewRecorder()
19 | called := false
20 | h := func(w http.ResponseWriter, r *http.Request) {
21 | w.WriteHeader(http.StatusOK)
22 | w.Header().Set("Content-Type", "application/json")
23 | io.WriteString(w, `{"alive": true}`)
24 | called = true
25 | v := mux.Vars(r)
26 | require.Equal(t, "", v["view_salt"])
27 | }
28 | r.Path(c.HandleLivePlaylistUrl()).Queries("vid", "{view_salt}").Name("LivePlaylistVid").HandlerFunc(h)
29 | r.HandleFunc(c.HandleLivePlaylistUrl(), h).Name("LivePlaylist")
30 |
31 | req, _ := http.NewRequest("GET", "/live/aa/playlist.m3u8", nil)
32 | r.ServeHTTP(rr, req)
33 | logrus.Debug(rr)
34 | assert.True(t, called)
35 | }
36 |
37 | func TestLiveHls_LivePlaylistPatternVid(t *testing.T) {
38 | r := mux.NewRouter()
39 | c := NewLiveHlsConfig()
40 | rr := httptest.NewRecorder()
41 | called := false
42 | h := func(w http.ResponseWriter, r *http.Request) {
43 | w.WriteHeader(http.StatusOK)
44 | w.Header().Set("Content-Type", "application/json")
45 | io.WriteString(w, `{"alive": true}`)
46 | called = true
47 | v := mux.Vars(r)
48 | require.Equal(t, "10", v["view_salt"])
49 | }
50 | r.Path(c.HandleLivePlaylistUrl()).Queries("vid", "{view_salt}").Name("LivePlaylistVid").HandlerFunc(h)
51 | r.HandleFunc(c.HandleLivePlaylistUrl(), h).Name("LivePlaylist")
52 |
53 | req, _ := http.NewRequest("GET", "/live/aa/playlist.m3u8?vid=10", nil)
54 | r.ServeHTTP(rr, req)
55 | logrus.Debug(rr)
56 | assert.True(t, called)
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/hls_server/http_server.go:
--------------------------------------------------------------------------------
1 | package hls_server
2 |
3 | import (
4 | "fmt"
5 | "github.com/gorilla/mux"
6 | "github.com/mitchellh/mapstructure"
7 | "github.com/pkg/errors"
8 | "github.com/sirupsen/logrus"
9 | "github.com/VKCOM/kive/ktypes"
10 | "github.com/VKCOM/kive/vsync"
11 | "io"
12 | "net/http"
13 | "net/http/pprof"
14 | "reflect"
15 | "strconv"
16 | "strings"
17 | "time"
18 | )
19 |
20 | func LogHandler(next http.Handler) http.Handler {
21 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
22 | logrus.Debugf("req: %+v, map: %+v, %+v", r.RequestURI, mux.Vars(r), r)
23 | next.ServeHTTP(w, r)
24 | logrus.Infof("req: %+v, response headers %+v", r, w.Header())
25 | })
26 | }
27 |
28 | type HttpResponse struct {
29 | HttpStatus int
30 | Reader io.ReadCloser
31 | }
32 |
33 | type LiveHls struct {
34 | httpServer *http.Server
35 | httpRouter *mux.Router
36 |
37 | config LiveHlsConfig
38 |
39 | HandleLivePlaylist func(*LivePlaylistRequest) (HttpResponse, error)
40 | HandleLiveChunk func(*LiveChunkRequest) (HttpResponse, error)
41 | HandleDvrPlayList func(*DvrPlaylistRequest) (HttpResponse, error)
42 | HandleDvrChunk func(*DvrChunkRequest) (HttpResponse, error)
43 | HandleVodManifest func(*VodManifestRequest) (HttpResponse, error)
44 | HandleVodChunk func(*VodChunkRequest) (HttpResponse, error)
45 |
46 | HandleRtmpHealth func(duration time.Duration) bool
47 |
48 | VodManifestMutex *vsync.Semaphore
49 | VodChunkMutex *vsync.Semaphore
50 | dvrPlaylistMutex *vsync.Semaphore
51 | dvrChunkMutex *vsync.Semaphore
52 | livePlaylistMutex *vsync.Semaphore
53 | liveChunkMutex *vsync.Semaphore
54 | }
55 |
56 | func parseRequest(req interface{}, r *http.Request) error {
57 | vars := mux.Vars(r)
58 | if err := mapstructure.WeakDecode(vars, req); err != nil {
59 | return errors.Wrapf(err, "error parsing %+v, on %+v", req, vars)
60 | }
61 | logrus.Debugf("Request parse %+v", req)
62 | return nil
63 | }
64 |
65 | func (lhls *LiveHls) handleReqTyped(req interface{}) (HttpResponse, error) {
66 | switch v := req.(type) {
67 | case *VodChunkRequest:
68 | return func() (HttpResponse, error) {
69 | if !lhls.VodChunkMutex.TryLock(time.Second * 20) {
70 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
71 | }
72 | defer lhls.VodChunkMutex.Unlock()
73 |
74 | return lhls.HandleVodChunk(req.(*VodChunkRequest))
75 | }()
76 | case *VodManifestRequest:
77 | return func() (HttpResponse, error) {
78 | if !lhls.VodManifestMutex.TryLock(time.Second * 10) {
79 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
80 | }
81 | defer lhls.VodManifestMutex.Unlock()
82 |
83 | return lhls.HandleVodManifest(req.(*VodManifestRequest))
84 | }()
85 | case *DvrChunkRequest:
86 | return func() (HttpResponse, error) {
87 | if !lhls.dvrChunkMutex.TryLock(time.Second * 10) {
88 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
89 | }
90 | defer lhls.dvrChunkMutex.Unlock()
91 |
92 | return lhls.HandleDvrChunk(req.(*DvrChunkRequest))
93 | }()
94 | case *DvrPlaylistRequest:
95 | return func() (HttpResponse, error) {
96 | if !lhls.dvrPlaylistMutex.TryLock(time.Second * 10) {
97 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
98 | }
99 | defer lhls.dvrPlaylistMutex.Unlock()
100 |
101 | return lhls.HandleDvrPlayList(req.(*DvrPlaylistRequest))
102 | }()
103 | case *LiveChunkRequest:
104 | return func() (HttpResponse, error) {
105 | if !lhls.liveChunkMutex.TryLock(time.Second * 10) {
106 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
107 | }
108 | defer lhls.liveChunkMutex.Unlock()
109 |
110 | return lhls.HandleLiveChunk(req.(*LiveChunkRequest))
111 | }()
112 | case *LivePlaylistRequest:
113 | return func() (HttpResponse, error) {
114 | if !lhls.livePlaylistMutex.TryLock(time.Second * 15) {
115 | return HttpResponse{HttpStatus: http.StatusRequestTimeout}, errors.New("timeout")
116 | }
117 | logrus.Infof("%+v", req.(*LivePlaylistRequest).StreamName)
118 | logrus.Infof("%+v", req.(*LivePlaylistRequest).StreamType)
119 | defer lhls.livePlaylistMutex.Unlock()
120 |
121 | return lhls.HandleLivePlaylist(req.(*LivePlaylistRequest))
122 | }()
123 | default:
124 | return HttpResponse{
125 | HttpStatus: http.StatusInternalServerError,
126 | Reader: nil,
127 | }, errors.Errorf("unknown type %+v", v)
128 | }
129 | }
130 |
131 | func (lhls *LiveHls) handleReq(req interface{}, w http.ResponseWriter, r *http.Request) error {
132 | methodName := reflect.TypeOf(req).String()
133 |
134 | err := parseRequest(req, r)
135 | if err != nil {
136 | w.WriteHeader(http.StatusBadRequest)
137 | ktypes.Stat(true, "http_handle", methodName, http.StatusText(http.StatusBadRequest))
138 | return err
139 | }
140 |
141 | res, err := lhls.handleReqTyped(req)
142 |
143 | if err != nil && res.HttpStatus == 0 {
144 | ktypes.Stat(true, "http_handle", methodName, http.StatusText(http.StatusBadRequest))
145 | w.WriteHeader(http.StatusBadRequest)
146 | return err
147 | } else if err != nil && res.HttpStatus != 0 {
148 | ktypes.Stat(true, "http_handle", methodName, http.StatusText(res.HttpStatus))
149 | w.WriteHeader(res.HttpStatus)
150 | return err
151 | }
152 |
153 | w.WriteHeader(res.HttpStatus)
154 | if res.Reader != nil {
155 | _, err = io.Copy(w, res.Reader)
156 | }
157 | if err != nil {
158 | ktypes.Stat(true, "http_handle", methodName, http.StatusText(http.StatusInternalServerError))
159 | logrus.Errorf("Bad response %+v", res)
160 | return err
161 | }
162 | ktypes.Stat(false, "http_handle", methodName, http.StatusText(http.StatusOK))
163 |
164 | return nil
165 | }
166 |
167 | func NewLiveHls(config LiveHlsConfig) (*LiveHls, error) {
168 | httpRouter := mux.NewRouter()
169 | httpRouter.Use(LogHandler)
170 |
171 | lhls := &LiveHls{
172 | config: config,
173 | httpRouter: httpRouter,
174 | VodManifestMutex: vsync.NewSemaphore(1, 4),
175 | VodChunkMutex: vsync.NewSemaphore(2, 3),
176 | dvrPlaylistMutex: vsync.NewSemaphore(2, 20),
177 | dvrChunkMutex: vsync.NewSemaphore(2, 40),
178 | livePlaylistMutex: vsync.NewSemaphore(50, 300),
179 | liveChunkMutex: vsync.NewSemaphore(20, 300),
180 | }
181 |
182 | livePlaylistHandler := func(w http.ResponseWriter, r *http.Request) {
183 | req := &LivePlaylistRequest{}
184 | w.Header().Set("Access-Control-Allow-Origin", "*")
185 | w.Header().Set("Content-Type", "application/vnd.apple.mpegurl")
186 | lhls.handleReq(req, w, r)
187 | }
188 |
189 | liveChunkHandler := func(w http.ResponseWriter, r *http.Request) {
190 | req := &LiveChunkRequest{}
191 | w.Header().Set("Access-Control-Allow-Origin", "*")
192 | w.Header().Set("Content-Type", "video/m2ts")
193 | lhls.handleReq(req, w, r)
194 | }
195 |
196 | dvrPlaylistHandler := func(w http.ResponseWriter, r *http.Request) {
197 | req := &DvrPlaylistRequest{}
198 | w.Header().Set("Access-Control-Allow-Origin", "*")
199 | w.Header().Set("Content-Type", "application/vnd.apple.mpegurl")
200 | lhls.handleReq(req, w, r)
201 | }
202 |
203 | dvrChunkHandler := func(w http.ResponseWriter, r *http.Request) {
204 | req := &DvrChunkRequest{}
205 | w.Header().Set("Access-Control-Allow-Origin", "*")
206 | w.Header().Set("Content-Type", "video/m2ts")
207 | lhls.handleReq(req, w, r)
208 | }
209 |
210 | httpRouter.Path(lhls.config.HandleLivePlaylistUrl()).Queries("vid", "{view_salt}").Name("LivePlaylistVid").HandlerFunc(livePlaylistHandler)
211 |
212 | httpRouter.HandleFunc(lhls.config.HandleLivePlaylistUrl(), livePlaylistHandler).Name("LivePlaylist")
213 | httpRouter.HandleFunc(lhls.config.HandleAbrChunksPlaylistUrl(), livePlaylistHandler).Name("LivePlaylist")
214 | httpRouter.HandleFunc(lhls.config.HandleAbrMasterPlaylistUrl(), livePlaylistHandler).Name("LivePlaylist")
215 |
216 | httpRouter.HandleFunc(lhls.config.HandleDvrPlaylistUrl(), dvrPlaylistHandler).Name("DvrPlaylist")
217 | httpRouter.HandleFunc(lhls.config.HandleDvrAbrPlaylistUrl(), dvrPlaylistHandler).Name("DvrPlaylist")
218 |
219 | httpRouter.HandleFunc(lhls.config.HandleDvrChunkUrl(), dvrChunkHandler).Name("DvrChunkUrl")
220 | httpRouter.HandleFunc(lhls.config.HandleDvrAbrChunkUrl(), dvrChunkHandler).Name("DvrChunkUrl")
221 |
222 | httpRouter.HandleFunc(lhls.config.HandleLiveChunkUrl(), liveChunkHandler).Name("LiveChunk")
223 | httpRouter.HandleFunc(lhls.config.HandleAbrChunkUrl(), liveChunkHandler).Name("LiveChunk")
224 |
225 | httpRouter.HandleFunc(lhls.config.HandleVodManifestUrl(), func(w http.ResponseWriter, r *http.Request) {
226 | req := &VodManifestRequest{}
227 | w.Header().Set("Content-Type", "text/plain")
228 | lhls.handleReq(req, w, r)
229 | }).Name("VodManifest")
230 |
231 | httpRouter.HandleFunc(lhls.config.HandleVodChunkUrl(), func(w http.ResponseWriter, r *http.Request) {
232 | ktypes.Stat(false, "chunk", "vod", "")
233 | req := &VodChunkRequest{}
234 | w.Header().Set("Content-Type", "video/m2ts")
235 | lhls.handleReq(req, w, r)
236 | }).Name("VodChunk")
237 |
238 | pprofr := httpRouter.PathPrefix("/debug/pprof").Subrouter()
239 | pprofr.HandleFunc("/", pprof.Index)
240 | pprofr.HandleFunc("/cmdline", pprof.Cmdline)
241 | pprofr.HandleFunc("/symbol", pprof.Symbol)
242 | pprofr.HandleFunc("/trace", pprof.Trace)
243 |
244 | profile := pprofr.PathPrefix("/profile").Subrouter()
245 | profile.HandleFunc("", pprof.Profile)
246 | profile.Handle("/goroutine", pprof.Handler("goroutine"))
247 | profile.Handle("/threadcreate", pprof.Handler("threadcreate"))
248 | profile.Handle("/heap", pprof.Handler("heap"))
249 | profile.Handle("/block", pprof.Handler("block"))
250 | profile.Handle("/mutex", pprof.Handler("mutex"))
251 |
252 | httpRouter.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
253 | ktypes.Stat(false, "health", "", "")
254 | if !lhls.dvrPlaylistMutex.TryLock(10 * time.Second) {
255 | w.WriteHeader(http.StatusRequestTimeout)
256 | w.Write([]byte("dvrPlaylistMutex"))
257 | ktypes.Stat(true, "health", "dvrPlaylistMutex", "")
258 | return
259 | }
260 | lhls.dvrPlaylistMutex.Unlock()
261 |
262 | if !lhls.dvrChunkMutex.TryLock(10 * time.Second) {
263 | w.WriteHeader(http.StatusRequestTimeout)
264 | w.Write([]byte("dvrChunkMutex"))
265 | ktypes.Stat(true, "health", "dvrChunkMutex", "")
266 | return
267 | }
268 | lhls.dvrChunkMutex.Unlock()
269 |
270 | if !lhls.livePlaylistMutex.TryLock(4 * time.Second) {
271 | w.WriteHeader(http.StatusRequestTimeout)
272 | w.Write([]byte("livePlaylistMutex"))
273 | ktypes.Stat(true, "health", "livePlaylistMutex", "")
274 | return
275 | }
276 | lhls.livePlaylistMutex.Unlock()
277 |
278 | if !lhls.liveChunkMutex.TryLock(4 * time.Second) {
279 | w.WriteHeader(http.StatusRequestTimeout)
280 | w.Write([]byte("liveChunkMutex"))
281 | ktypes.Stat(true, "health", "liveChunkMutex", "")
282 | return
283 | }
284 | lhls.liveChunkMutex.Unlock()
285 |
286 | if !lhls.HandleRtmpHealth(10 * time.Second) {
287 | w.WriteHeader(http.StatusRequestTimeout)
288 | w.Write([]byte("HandleRtmpHealth"))
289 | ktypes.Stat(true, "health", "HandleRtmpHealth", "")
290 | return
291 | }
292 |
293 | w.WriteHeader(http.StatusOK)
294 | w.Write([]byte("Ok"))
295 | })
296 |
297 | httpRouter.HandleFunc(lhls.config.HandleLivePlayerUrl(), func(w http.ResponseWriter, r *http.Request) {
298 | req := &LivePlaylistRequest{}
299 | err := parseRequest(req, r)
300 | if err != nil {
301 | w.WriteHeader(http.StatusBadRequest)
302 | }
303 | player := NewPlayerPage()
304 | player.Port = config.HttpPort
305 | player.Application = req.Application
306 | player.StreamName = req.StreamName
307 | w.WriteHeader(http.StatusOK)
308 | player.ComposePlayerPage(w)
309 | })
310 |
311 | httpServer := &http.Server{
312 | Addr: fmt.Sprintf("%s:%d", lhls.config.HttpHost, lhls.config.HttpPort),
313 | Handler: httpRouter,
314 | WriteTimeout: time.Second * 30,
315 | ReadTimeout: time.Second * 30,
316 | IdleTimeout: time.Second * 30,
317 | }
318 |
319 | lhls.httpServer = httpServer
320 | return lhls, nil
321 | }
322 |
323 | func (lhls *LiveHls) BuildLiveChunkName(r *LiveChunkRequest) string {
324 | res := strings.Replace(lhls.config.LiveChunk, "{stream_name_chunk}", r.StreamNameChunk, -1)
325 | return strings.Replace(res, "{chunk_name}", r.ChunkName, -1)
326 | }
327 |
328 | func (lhls *LiveHls) BuildDvrChunkName(r *DvrChunkRequest) string {
329 | res := strings.Replace(lhls.config.DvrChunk, "{stream_name_chunk}", r.StreamNameChunk, -1)
330 | return strings.Replace(res, "{chunk_name}", r.ChunkName, -1)
331 | }
332 |
333 | func (lhls *LiveHls) BuildDvrChunksPlaylist(r *DvrPlaylistRequest) string {
334 | res := strings.Replace(lhls.config.DvrPlaylist, "{from:[0-9]+}", strconv.FormatInt(r.From, 10), -1)
335 | res = strings.Replace(res, "{duration:[0-9]+}", strconv.FormatInt(r.Duration, 10), -1)
336 | return strings.Replace(res, "{playlist|chunks}", "chunks", -1)
337 | }
338 |
339 | //func (lhls *LiveHls) BuildVodManifestChunkName(r *VodChunkRequest) string {
340 | // res := strings.Replace(lhls.config.VodChunk, "{chunk_name}", r.ChunkName, -1)
341 | // return res
342 | //}
343 |
344 | func (lhls *LiveHls) Listen() error {
345 | go func() {
346 | err := lhls.httpServer.ListenAndServe()
347 | if err != nil && err != http.ErrServerClosed {
348 | logrus.Panicf("cannot listen and serve http %+v", err)
349 | }
350 | }()
351 | return nil
352 | }
353 |
354 | func (lhls *LiveHls) Serve() error {
355 | return nil
356 | }
357 |
358 | func (lhls *LiveHls) Stop() error {
359 | return lhls.httpServer.Close()
360 |
361 | }
362 |
--------------------------------------------------------------------------------
/hls_server/player.go:
--------------------------------------------------------------------------------
1 | package hls_server
2 |
3 | import (
4 | "github.com/sirupsen/logrus"
5 | "github.com/VKCOM/kive/kassets"
6 | "html/template"
7 | "io"
8 | )
9 |
10 | type PlayerPage struct {
11 | StreamName string
12 | Application string
13 | Port int
14 | }
15 |
16 | func NewPlayerPage() *PlayerPage {
17 | return &PlayerPage{}
18 | }
19 |
20 | func (p *PlayerPage) ComposePlayerPage(writer io.Writer) (string, error) {
21 | b, err := kassets.Asset("assets/index.html")
22 | if err != nil {
23 | logrus.Error("Player asset not found")
24 | }
25 |
26 | t, err := template.New("HLSPlayer").Parse(string(b))
27 | if err != nil {
28 | return "", err
29 | }
30 | t.Execute(writer, struct {
31 | Title string
32 | Port int
33 | Application string
34 | StreamName string
35 | }{Title: "test", Port: p.Port, Application: p.Application, StreamName: p.StreamName})
36 |
37 | return "", nil
38 | }
39 |
--------------------------------------------------------------------------------
/hls_server/requests.go:
--------------------------------------------------------------------------------
1 | package hls_server
2 |
3 | import "fmt"
4 |
5 | type LivePlaylistRequest struct {
6 | Application string `mapstructure:"app"`
7 | StreamName string `mapstructure:"stream_name"`
8 | ViewSalt string `mapstructure:"view_salt"`
9 | StreamType string `mapstructure:"stream_type"`
10 | }
11 |
12 | type DvrPlaylistRequest struct {
13 | Application string `mapstructure:"app"`
14 | StreamName string `mapstructure:"stream_name"`
15 | From int64 `mapstructure:"from"`
16 | Duration int64 `mapstructure:"duration"`
17 | StreamType string `mapstructure:"stream_type"`
18 | }
19 |
20 | type ChunkRequest struct {
21 | Application string `mapstructure:"app" json:"application"`
22 | StreamName string `mapstructure:"stream_name" json:"stream_name"`
23 | StreamNameChunk string `mapstructure:"stream_name_chunk" json:"stream_chunk_name"`
24 | ChunkName string `mapstructure:"chunk_name" json:"chunk_name"`
25 | ViewSalt string `mapstructure:"view_salt" json:"-"`
26 | }
27 |
28 | type LiveChunkRequest ChunkRequest
29 | type DvrChunkRequest ChunkRequest
30 |
31 | func (cr *DvrChunkRequest) Key() string {
32 | return fmt.Sprintf("%s/%s/%s/%s", cr.Application, cr.StreamName, cr.StreamNameChunk, cr.ChunkName)
33 | }
34 |
35 | type VodManifestRequest DvrPlaylistRequest
36 | type VodChunkRequest struct {
37 | StreamType string `mapstructure:"stream_type"`
38 | StreamName string `mapstructure:"stream_name"`
39 | ChunkName string `mapstructure:"chunk_name"`
40 | }
41 |
--------------------------------------------------------------------------------
/integration_tests/streaming_test.go:
--------------------------------------------------------------------------------
1 | package integration_tests
2 |
3 | import (
4 | "fmt"
5 | "github.com/VKCOM/kive/ktypes"
6 | "github.com/stretchr/testify/assert"
7 | "io/ioutil"
8 | "testing"
9 | //"os"
10 | "time"
11 | )
12 |
13 | func init() {
14 | ktypes.Recover = false
15 | }
16 |
17 | func Test_StreamRunSmoke(t *testing.T) {
18 | w := NewWrapper(t)
19 | err := w.W.Listen()
20 | assert.NoError(t, err)
21 |
22 | err = w.W.Serve()
23 | assert.NoError(t, err)
24 | defer w.W.Stop()
25 |
26 | trans := NewStreamer(t, VK_SYNC_MP4, BuildOutput(w, "live", "123"))
27 | trans.MediaFile().SetDuration("2")
28 | done := trans.Run(false)
29 | err = <-done
30 | assert.NoError(t, err)
31 | }
32 |
33 | func Test_StreamRunSeveral(t *testing.T) {
34 | w := NewWrapper(t)
35 | err := w.W.Listen()
36 | assert.NoError(t, err)
37 |
38 | err = w.W.Serve()
39 | assert.NoError(t, err)
40 | defer w.W.Stop()
41 |
42 | wait := make([]<-chan error, 0, 0)
43 | for i := 0; i < 3; i++ {
44 | trans := NewStreamer(t, VK_SYNC_MP4, BuildOutput(w, "live", fmt.Sprintf("stream%d", i)))
45 | trans.MediaFile().SetDuration("15")
46 | done := trans.Run(false)
47 | wait = append(wait, done)
48 | }
49 | for _, c := range wait {
50 | err = <-c
51 | assert.NoError(t, err)
52 | }
53 | }
54 |
55 | func Test_StreamAndRecordLive(t *testing.T) {
56 | tempOut, err := ioutil.TempFile("", "kive_mp4_out_")
57 | assert.NoError(t, err)
58 | //defer os.Remove(tempOut.Name())
59 | t.Log(tempOut.Name())
60 |
61 | w := NewWrapper(t)
62 | err = w.W.Listen()
63 | assert.NoError(t, err)
64 |
65 | err = w.W.Serve()
66 | assert.NoError(t, err)
67 | defer w.W.Stop()
68 |
69 | transStream := NewStreamer(t, VK_SYNC_MP4, BuildOutput(w, "live", "123"))
70 | doneStream := transStream.Run(false)
71 | time.Sleep(5 * time.Second)
72 |
73 | transView := NewViewer(t, BuildInput(w, "live", "123"), tempOut.Name())
74 | doneView := transView.Run(false)
75 |
76 | err = <-doneView
77 | assert.NoError(t, err)
78 |
79 | err = <-doneStream
80 | assert.NoError(t, err)
81 | AssertSameSmoke(t, tempOut.Name(), VK_SYNC_MP4)
82 | }
83 |
--------------------------------------------------------------------------------
/integration_tests/wrapper.go:
--------------------------------------------------------------------------------
1 | package integration_tests
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "github.com/VKCOM/kive/ktypes"
8 | "github.com/VKCOM/kive/noop_api"
9 | "github.com/VKCOM/kive/worker"
10 | "github.com/otiai10/curr"
11 | "github.com/phayes/freeport"
12 | "github.com/stretchr/testify/assert"
13 | "github.com/xfrr/goffmpeg/transcoder"
14 | "io/ioutil"
15 | "os"
16 | "os/exec"
17 | "path/filepath"
18 | "strconv"
19 | "strings"
20 | "testing"
21 | )
22 |
23 | var (
24 | VK_SYNC_MP4 = GetAsset("vk_sync.mp4")
25 | )
26 |
27 | var (
28 | FFROBE_CHECK = "-v quiet -show_entries stream=nb_read_frames -of default=nokey=1:noprint_wrappers=1 -count_frames %s"
29 | )
30 |
31 | type Wrapper struct {
32 | W *worker.Worker
33 | C worker.Config
34 | }
35 |
36 | func NewWrapper(t *testing.T) *Wrapper {
37 | ktypes.ApiInst = &noop_api.NoopApi{}
38 |
39 | dir, err := ioutil.TempDir("", "kive_it_")
40 | assert.NoError(t, err)
41 | defer os.RemoveAll(dir)
42 |
43 | t.Log(dir)
44 | c := worker.NewConfig(worker.DEFAULT_CONFIG)
45 | c.KfsConfig.Basedir = dir
46 | c.RtmpServerConfig.RtmpPort, err = freeport.GetFreePort()
47 | assert.NoError(t, err)
48 | t.Log(c.RtmpServerConfig.RtmpPort)
49 |
50 | c.LiveHlsConfig.HttpPort, err = freeport.GetFreePort()
51 | assert.NoError(t, err)
52 | t.Log(c.LiveHlsConfig.HttpPort)
53 |
54 | w, err := worker.NewWorker(c)
55 | assert.NoError(t, err)
56 | return &Wrapper{
57 | W: w,
58 | C: c,
59 | }
60 | }
61 |
62 | func GetAsset(n string) string {
63 | return filepath.Clean(curr.Dir() + "/../test_assets/" + n)
64 | }
65 |
66 | func BuildOutput(w *Wrapper, app string, streamName string) string {
67 | out := fmt.Sprintf("rtmp://localhost:%d/%s/%s", w.W.Config.RtmpServerConfig.RtmpPort, app, streamName)
68 | return out
69 | }
70 |
71 | func BuildInput(w *Wrapper, app string, streamName string) string {
72 | out := fmt.Sprintf("http://localhost:%d/%s/%s/playlist.m3u8", w.W.Config.LiveHlsConfig.HttpPort, app, streamName)
73 | return out
74 | }
75 |
76 | func NewStreamer(t *testing.T, in string, out string) *transcoder.Transcoder {
77 | trans := new(transcoder.Transcoder)
78 | err := trans.Initialize(in, out)
79 | assert.NoError(t, err)
80 |
81 | trans.MediaFile().SetAudioCodec("copy")
82 | trans.MediaFile().SetVideoCodec("copy")
83 | trans.MediaFile().SetOutputFormat("flv")
84 | trans.MediaFile().SetNativeFramerateInput(true)
85 | trans.MediaFile().SetCopyTs(true)
86 |
87 | return trans
88 | }
89 |
90 | func NewViewer(t *testing.T, in string, out string) *transcoder.Transcoder {
91 | trans := new(transcoder.Transcoder)
92 | err := trans.Initialize(in, out)
93 | assert.NoError(t, err)
94 |
95 | trans.MediaFile().SetAudioCodec("copy")
96 | trans.MediaFile().SetVideoCodec("copy")
97 | trans.MediaFile().SetOutputFormat("mp4")
98 | trans.MediaFile().SetCopyTs(true)
99 |
100 | return trans
101 | }
102 |
103 | func AssertSameSmoke(t *testing.T, l string, r string) bool {
104 | _, err := os.Stat(l)
105 | assert.NoError(t, err)
106 |
107 | _, err = os.Stat(r)
108 | assert.NoError(t, err)
109 |
110 | //transL := new(transcoder.Transcoder)
111 | //err = transL.Initialize(l, "-")
112 | //assert.NoError(t, err)
113 | //
114 | //transR := new(transcoder.Transcoder)
115 | //err = transR.Initialize(r, "-")
116 | //assert.NoError(t, err)
117 | //
118 |
119 | assert.Equal(t, frameCount(t, l), frameCount(t, r))
120 |
121 | return true
122 | }
123 |
124 | func frameCount(t *testing.T, f string) []int {
125 | cmd := exec.Command("ffprobe", strings.Split(fmt.Sprintf(FFROBE_CHECK, f), " ")...)
126 | out, err := cmd.CombinedOutput()
127 | assert.NoError(t, err)
128 | res := make([]int, 0, 0)
129 | scanner := bufio.NewScanner(bytes.NewReader(out))
130 | for scanner.Scan() {
131 | r, err := strconv.ParseInt(scanner.Text(), 10, 32)
132 | assert.NoError(t, err)
133 | res = append(res, int(r))
134 | }
135 | t.Log(f, res)
136 |
137 | return res
138 | }
139 |
--------------------------------------------------------------------------------
/kassets/assets/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | {{.Title}}
5 |
6 |
7 |
8 |
9 |
10 |
11 | {{.StreamName}} stream
12 |
13 |
14 |
15 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/kassets/bindata.go:
--------------------------------------------------------------------------------
1 | // Code generated by go-bindata.
2 | // sources:
3 | // assets/index.html
4 | // DO NOT EDIT!
5 |
6 | package kassets
7 |
8 | import (
9 | "bytes"
10 | "compress/gzip"
11 | "fmt"
12 | "io"
13 | "io/ioutil"
14 | "os"
15 | "path/filepath"
16 | "strings"
17 | "time"
18 | )
19 |
20 | func bindataRead(data []byte, name string) ([]byte, error) {
21 | gz, err := gzip.NewReader(bytes.NewBuffer(data))
22 | if err != nil {
23 | return nil, fmt.Errorf("Read %q: %v", name, err)
24 | }
25 |
26 | var buf bytes.Buffer
27 | _, err = io.Copy(&buf, gz)
28 | clErr := gz.Close()
29 |
30 | if err != nil {
31 | return nil, fmt.Errorf("Read %q: %v", name, err)
32 | }
33 | if clErr != nil {
34 | return nil, err
35 | }
36 |
37 | return buf.Bytes(), nil
38 | }
39 |
40 | type asset struct {
41 | bytes []byte
42 | info os.FileInfo
43 | }
44 |
45 | type bindataFileInfo struct {
46 | name string
47 | size int64
48 | mode os.FileMode
49 | modTime time.Time
50 | }
51 |
52 | func (fi bindataFileInfo) Name() string {
53 | return fi.name
54 | }
55 | func (fi bindataFileInfo) Size() int64 {
56 | return fi.size
57 | }
58 | func (fi bindataFileInfo) Mode() os.FileMode {
59 | return fi.mode
60 | }
61 | func (fi bindataFileInfo) ModTime() time.Time {
62 | return fi.modTime
63 | }
64 | func (fi bindataFileInfo) IsDir() bool {
65 | return false
66 | }
67 | func (fi bindataFileInfo) Sys() interface{} {
68 | return nil
69 | }
70 |
71 | var _assetsIndexHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x54\xc1\x6a\x23\x39\x10\xbd\xe7\x2b\x0a\x5f\xba\x0d\x59\xc9\xde\x85\xdd\x25\x69\x9b\xcd\xb2\x5e\x12\x48\x42\x88\x3d\xcc\x71\x2c\x4b\x65\x4b\x19\xb5\xd4\x48\xd5\xf6\x18\xd3\xff\x3e\xb4\x34\x4e\xec\xe4\x34\x30\xb7\xea\x52\x55\xbd\x57\x8f\xd7\x55\x69\xaa\xed\xf4\xe2\x02\xa0\xd2\x28\xd4\xf4\x02\x00\xa0\x22\x43\x16\xa7\x87\x03\x5b\xf4\x41\xd7\x55\x3c\x67\xfa\x32\x9e\xeb\xfa\x70\xe5\xd5\x3e\x77\x00\x54\x51\x06\xd3\x10\xc4\x20\x27\x03\x4d\xd4\xc4\x2b\xce\xa5\x72\xec\x25\x2a\xb4\x66\x1b\x98\x43\xe2\xae\xa9\xb9\xb6\x91\xbd\xc4\x7f\xac\x20\x8c\x34\x98\x56\x3c\x77\xa6\x91\x69\x92\x44\x47\x18\x8e\x83\x53\x4a\x8f\x7b\x36\x73\x0a\x28\xea\x47\x51\x63\xd7\x41\x4c\x1f\x15\xd7\xe3\xb3\xca\xad\x51\xe8\x41\xa3\xd9\x68\x9a\x0c\xfe\x1c\x8d\x06\x60\xd4\x64\x90\xd2\x03\x90\xde\x51\xf0\x36\x4e\x2b\x9e\x32\xaf\xec\xf9\x11\xf4\x7c\x9d\xb7\xd1\x66\x5d\xde\xda\xc8\x4c\x9c\xb7\x4d\xe3\x03\xa1\x2a\x87\x43\x38\x9c\x40\x6f\x45\x80\x8c\x3e\x01\xe5\x65\x5b\xa3\x23\xb6\x41\x9a\x59\xec\xc3\x7f\xf7\x77\xaa\x2c\x52\x41\x31\xbc\x7e\xd7\xa7\x6d\x84\x09\x38\xdc\xc1\xad\x8d\xe5\xd9\x73\x2f\x97\xf5\x42\xcd\x7d\x1b\x24\x96\x45\xaf\xed\x15\xe7\xe3\xdf\xff\x62\x23\x36\x62\xe3\xab\xc3\x81\x3d\xf9\x40\x5d\xc7\x0f\x07\x76\xd3\x34\xd6\x48\x41\xc6\xbb\x9c\x38\xd5\x8c\x37\x56\xec\xad\x89\xc4\xea\x3f\xda\xbf\x8b\x0f\x30\x82\x48\x48\xfd\x80\xca\x88\x32\x11\xfd\x50\xe1\x5d\x52\x61\xb6\x45\x47\x91\x3d\xdc\x3c\xde\xfd\x3f\x9b\x2f\xbe\x3c\xdd\x3c\xcf\x67\xff\x5d\xae\x5b\x27\x7b\xe4\xf2\x5c\x18\xc8\xb2\xb0\x1e\xfd\x74\xb7\xee\x2d\xee\x8e\x01\xe7\x90\xfd\x01\x26\x82\xf3\x04\xf1\x28\x37\x78\x07\x8d\x15\xb4\xf6\xa1\x8e\x40\x5a\x10\x28\x9f\x4a\xb4\xd8\x22\x24\xd6\x90\x45\x82\xd9\x37\x42\x17\x8d\x77\x11\xca\x87\xf9\x6c\x08\xe8\xc4\xca\xa2\x62\x27\x30\x9f\x35\x3a\x20\x8d\xb0\x0a\x7e\x17\x31\x80\x16\x11\x56\xad\xb1\xf4\x9b\x71\x70\x7b\x3f\x3f\x42\x43\x29\x35\xca\xaf\xd0\x46\xe3\x36\xb0\x94\xc2\x3d\x59\xb1\x5f\xec\x1b\x5c\x0e\x2f\x61\x87\x20\x85\x83\x26\xf8\x7e\x49\x10\xb9\xb5\x16\xce\xac\x31\x12\x94\x86\x21\x83\xa4\x37\x7c\x7a\xbe\x1f\x82\x32\x01\x25\xd9\x3d\x90\x4f\xe8\xd9\x31\x98\x3d\x02\xa4\x83\x6f\x37\x9a\xd2\xd3\x32\x06\xb9\xec\x27\x37\x18\x68\x7f\xca\x7d\xa1\x4d\x12\x28\x53\x4a\x4b\x1c\x89\x1f\x49\xfb\x75\xca\x37\x56\x18\x77\x0e\x72\x09\x3b\x43\xda\xb7\xf4\xa3\x3d\xeb\xfd\x3a\x1e\xd0\x46\x04\xb3\x86\x6c\x01\x76\xb2\x6f\x59\x88\x37\x7b\xf1\xad\x53\xac\xff\x46\x56\x37\xb8\x69\x83\x2d\xde\xff\x10\xa9\x3f\x06\x09\x13\xf8\x85\xb6\xbd\xfe\x00\x21\x94\x4a\x8e\xbc\x37\x91\xd0\x61\x28\x0b\x29\x5c\xdf\x54\xfc\xac\x25\xcf\x4c\xf9\xea\xca\xb3\x13\x55\xf1\x7c\xf6\x2a\x9e\x4f\xe7\xf7\x00\x00\x00\xff\xff\xd6\xac\x5c\xe8\x42\x05\x00\x00")
72 |
73 | func assetsIndexHtmlBytes() ([]byte, error) {
74 | return bindataRead(
75 | _assetsIndexHtml,
76 | "assets/index.html",
77 | )
78 | }
79 |
80 | func assetsIndexHtml() (*asset, error) {
81 | bytes, err := assetsIndexHtmlBytes()
82 | if err != nil {
83 | return nil, err
84 | }
85 |
86 | info := bindataFileInfo{name: "assets/index.html", size: 1346, mode: os.FileMode(420), modTime: time.Unix(1548966886, 0)}
87 | a := &asset{bytes: bytes, info: info}
88 | return a, nil
89 | }
90 |
91 | // Asset loads and returns the asset for the given name.
92 | // It returns an error if the asset could not be found or
93 | // could not be loaded.
94 | func Asset(name string) ([]byte, error) {
95 | cannonicalName := strings.Replace(name, "\\", "/", -1)
96 | if f, ok := _bindata[cannonicalName]; ok {
97 | a, err := f()
98 | if err != nil {
99 | return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
100 | }
101 | return a.bytes, nil
102 | }
103 | return nil, fmt.Errorf("Asset %s not found", name)
104 | }
105 |
106 | // MustAsset is like Asset but panics when Asset would return an error.
107 | // It simplifies safe initialization of global variables.
108 | func MustAsset(name string) []byte {
109 | a, err := Asset(name)
110 | if err != nil {
111 | panic("asset: Asset(" + name + "): " + err.Error())
112 | }
113 |
114 | return a
115 | }
116 |
117 | // AssetInfo loads and returns the asset info for the given name.
118 | // It returns an error if the asset could not be found or
119 | // could not be loaded.
120 | func AssetInfo(name string) (os.FileInfo, error) {
121 | cannonicalName := strings.Replace(name, "\\", "/", -1)
122 | if f, ok := _bindata[cannonicalName]; ok {
123 | a, err := f()
124 | if err != nil {
125 | return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
126 | }
127 | return a.info, nil
128 | }
129 | return nil, fmt.Errorf("AssetInfo %s not found", name)
130 | }
131 |
132 | // AssetNames returns the names of the assets.
133 | func AssetNames() []string {
134 | names := make([]string, 0, len(_bindata))
135 | for name := range _bindata {
136 | names = append(names, name)
137 | }
138 | return names
139 | }
140 |
141 | // _bindata is a table, holding each asset generator, mapped to its name.
142 | var _bindata = map[string]func() (*asset, error){
143 | "assets/index.html": assetsIndexHtml,
144 | }
145 |
146 | // AssetDir returns the file names below a certain
147 | // directory embedded in the file by go-bindata.
148 | // For example if you run go-bindata on data/... and data contains the
149 | // following hierarchy:
150 | // data/
151 | // foo.txt
152 | // img/
153 | // a.png
154 | // b.png
155 | // then AssetDir("data") would return []string{"foo.txt", "img"}
156 | // AssetDir("data/img") would return []string{"a.png", "b.png"}
157 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error
158 | // AssetDir("") will return []string{"data"}.
159 | func AssetDir(name string) ([]string, error) {
160 | node := _bintree
161 | if len(name) != 0 {
162 | cannonicalName := strings.Replace(name, "\\", "/", -1)
163 | pathList := strings.Split(cannonicalName, "/")
164 | for _, p := range pathList {
165 | node = node.Children[p]
166 | if node == nil {
167 | return nil, fmt.Errorf("Asset %s not found", name)
168 | }
169 | }
170 | }
171 | if node.Func != nil {
172 | return nil, fmt.Errorf("Asset %s not found", name)
173 | }
174 | rv := make([]string, 0, len(node.Children))
175 | for childName := range node.Children {
176 | rv = append(rv, childName)
177 | }
178 | return rv, nil
179 | }
180 |
181 | type bintree struct {
182 | Func func() (*asset, error)
183 | Children map[string]*bintree
184 | }
185 |
186 | var _bintree = &bintree{nil, map[string]*bintree{
187 | "assets": &bintree{nil, map[string]*bintree{
188 | "index.html": &bintree{assetsIndexHtml, map[string]*bintree{}},
189 | }},
190 | }}
191 |
192 | // RestoreAsset restores an asset under the given directory
193 | func RestoreAsset(dir, name string) error {
194 | data, err := Asset(name)
195 | if err != nil {
196 | return err
197 | }
198 | info, err := AssetInfo(name)
199 | if err != nil {
200 | return err
201 | }
202 | err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
203 | if err != nil {
204 | return err
205 | }
206 | err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
207 | if err != nil {
208 | return err
209 | }
210 | err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
211 | if err != nil {
212 | return err
213 | }
214 | return nil
215 | }
216 |
217 | // RestoreAssets restores an asset under the given directory recursively
218 | func RestoreAssets(dir, name string) error {
219 | children, err := AssetDir(name)
220 | // File
221 | if err != nil {
222 | return RestoreAsset(dir, name)
223 | }
224 | // Dir
225 | for _, child := range children {
226 | err = RestoreAssets(dir, filepath.Join(name, child))
227 | if err != nil {
228 | return err
229 | }
230 | }
231 | return nil
232 | }
233 |
234 | func _filePath(dir, name string) string {
235 | cannonicalName := strings.Replace(name, "\\", "/", -1)
236 | return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
237 | }
238 |
--------------------------------------------------------------------------------
/kfs/config.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import "time"
4 |
5 | type KfsConfig struct {
6 | Basedir string
7 | MaxSize int
8 | MaxSourceSize int
9 | RemovalTime time.Duration
10 | MaxCacheSize int64
11 | WriteTimeout time.Duration
12 | }
13 |
14 | func NewKfsConfig() KfsConfig {
15 | return KfsConfig{
16 | Basedir: "/tmp/kive/",
17 | MaxSize: 1024 * 1024 * 20,
18 | MaxSourceSize: 1024 * 1024 * 80,
19 | RemovalTime: 24*1*time.Hour + 14*time.Hour,
20 | MaxCacheSize: 5000000,
21 | WriteTimeout: 1 * time.Second,
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/kfs/filesystem.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "io"
7 | "io/ioutil"
8 | "os"
9 | "path"
10 | "path/filepath"
11 | "sort"
12 | "strings"
13 | "sync"
14 | "syscall"
15 | "time"
16 |
17 | "github.com/google/btree"
18 | "github.com/pkg/errors"
19 | "github.com/sirupsen/logrus"
20 | "github.com/VKCOM/kive/ktypes"
21 | )
22 |
23 | const (
24 | INMEMORY = "memory"
25 | )
26 |
27 | type Filesystem struct {
28 | Md *Metadata
29 |
30 | config KfsConfig
31 | writeQueue chan *bufferItem
32 | writeMetaQueue chan innerStorage
33 | deleteQueue chan innerStorage
34 | streamDeletion chan string
35 |
36 | finalizerWriters sync.WaitGroup
37 |
38 | m sync.RWMutex
39 | buffers *btree.BTree
40 | activeWriters uint32
41 |
42 | stopped bool
43 | }
44 |
45 | type bufferItem struct {
46 | resultKey innerStorage
47 | queue *queue
48 | }
49 |
50 | func (bi *bufferItem) Less(rhs btree.Item) bool {
51 | return bi.resultKey.ChunkKey.Less(rhs.(*bufferItem).resultKey.ChunkKey)
52 | }
53 |
54 | func NewFilesystem(config KfsConfig) (*Filesystem, error) {
55 | if config.Basedir != INMEMORY {
56 | basedir, err := filepath.EvalSymlinks(config.Basedir)
57 |
58 | if err != nil {
59 | return nil, errors.Errorf("cannot evaluate symlinks %s", basedir)
60 | }
61 |
62 | if !filepath.IsAbs(basedir) {
63 | return nil, errors.Errorf("not abs path %s", basedir)
64 | }
65 |
66 | err = os.MkdirAll(basedir, os.ModePerm)
67 |
68 | if err != nil {
69 | return nil, errors.Wrapf(err, "cannot create directory %s", basedir)
70 | }
71 | config.Basedir = filepath.Clean(config.Basedir)
72 | }
73 |
74 | fs := &Filesystem{
75 | config: config,
76 | buffers: btree.New(2),
77 | writeQueue: make(chan *bufferItem, 256),
78 | writeMetaQueue: make(chan innerStorage, 256),
79 | deleteQueue: make(chan innerStorage, 1024*100),
80 | streamDeletion: make(chan string, 300),
81 | Md: NewMetadata(config),
82 | }
83 |
84 | if config.Basedir != INMEMORY {
85 | go fs.writerGoroutine()
86 | }
87 |
88 | if config.RemovalTime != 0 {
89 | go fs.cleanUpGoroutine()
90 | }
91 |
92 | return fs, nil
93 | }
94 |
95 | func (fs *Filesystem) Finalize() {
96 | fs.m.Lock()
97 | wasStopped := fs.stopped
98 | fs.stopped = true
99 | fs.m.Unlock()
100 | if wasStopped {
101 | return
102 | }
103 |
104 | fs.finalizerWriters.Wait()
105 | close(fs.writeQueue)
106 | close(fs.deleteQueue)
107 | close(fs.streamDeletion)
108 | }
109 |
110 | func (fs *Filesystem) Delete(ci ktypes.ChunkInfo) error {
111 | key := innerStorage(ci)
112 | err := validateStorageInfo(key)
113 |
114 | if err != nil {
115 | return errors.Wrapf(err, "invalid chunk info %+v", key)
116 | }
117 |
118 | fs.deleteFromBuffer(key)
119 | select {
120 | case fs.deleteQueue <- key:
121 | default:
122 | return errors.New("cannot add to deletion queue")
123 | }
124 | return nil
125 | }
126 |
127 | func (fs *Filesystem) Reader(ci ktypes.ChunkInfo) (io.ReadCloser, error) {
128 | logrus.WithField("stream_name", ci.StreamName).Debugf("Getting %+v", ci)
129 | key := innerStorage(ci)
130 | err := validateStorageInfo(key)
131 |
132 | if err != nil {
133 | return nil, errors.Wrapf(err, "cannot get reader %+v", key)
134 | }
135 |
136 | reader, err := fs.readerBuffer(key)
137 | if err != nil {
138 | return nil, errors.Wrapf(err, "cannot get reader %+v", key)
139 | }
140 | if reader != nil {
141 | return reader, nil
142 | }
143 |
144 | return fs.fileReader(fs.config.buildFullPath(key))
145 | }
146 |
147 | func (fs *Filesystem) Writer(ch ktypes.ChunkInfo) (*Writer, error) {
148 | key := innerStorage(ch)
149 | err := validateStorageInfo(key)
150 | key.Duration = ktypes.DEFAULT_DURATION
151 | if err != nil {
152 | return nil, errors.Wrapf(err, "cannot get writer %+v", key)
153 | }
154 |
155 | if fs.config.Basedir != INMEMORY {
156 | err = os.MkdirAll(fs.config.buildDirPath(key), os.ModePerm)
157 | if err != nil {
158 | return nil, errors.Wrapf(err, "cannot create dir for %+v", key)
159 | }
160 | }
161 |
162 | maxQueueSize := fs.config.MaxSize
163 | if ch.StreamType == ktypes.SOURCE {
164 | maxQueueSize = fs.config.MaxSourceSize
165 | }
166 |
167 | bufi := &bufferItem{
168 | resultKey: key,
169 | queue: NewQueue(maxQueueSize),
170 | }
171 |
172 | fs.m.Lock()
173 | defer fs.m.Unlock()
174 | if fs.stopped {
175 | return nil, errors.New("already stopped")
176 | }
177 | item := fs.buffers.Get(bufi)
178 |
179 | if item != nil {
180 | return nil, errors.Errorf("already written %+v", key)
181 | }
182 |
183 | fs.buffers.ReplaceOrInsert(bufi)
184 | fs.finalizerWriters.Add(1)
185 | return &Writer{item: bufi, fs: fs}, nil
186 | }
187 |
188 | func (fs *Filesystem) WriteMeta(key ktypes.ChunkInfo) error {
189 | return fs.writeMeta(innerStorage(key))
190 | }
191 |
192 | func (fs *Filesystem) fileReader(fullname string) (io.ReadCloser, error) {
193 | if fs.config.Basedir == INMEMORY {
194 | return nil, errors.Errorf("cannot get file reader %s", fullname)
195 | }
196 | b, err := ioutil.ReadFile(fullname)
197 |
198 | if err != nil {
199 | return nil, errors.Wrapf(err, "cannot get file reader %s", fullname)
200 | }
201 | return ioutil.NopCloser(bytes.NewReader(b)), nil
202 | }
203 |
204 | func (fs *Filesystem) deleteFromBuffer(rkey innerStorage) {
205 | bufi := bufferItem{
206 | resultKey: rkey,
207 | }
208 |
209 | fs.m.Lock()
210 | defer fs.m.Unlock()
211 | fs.buffers.Delete(&bufi)
212 | }
213 |
214 | func (fs *Filesystem) readerBuffer(rkey innerStorage) (io.ReadCloser, error) {
215 | fs.m.Lock()
216 | defer fs.m.Unlock()
217 |
218 | bufi := bufferItem{
219 | resultKey: rkey,
220 | }
221 | item := fs.buffers.Get(&bufi)
222 | if item == nil {
223 | return nil, nil
224 | }
225 |
226 | return ioutil.NopCloser(item.(*bufferItem).queue.Oldest()), nil
227 | }
228 |
229 | func (fs *Filesystem) eraseBuffer(bufi *bufferItem) error {
230 | fs.m.Lock()
231 | defer fs.m.Unlock()
232 |
233 | item := fs.buffers.Delete(bufi)
234 | if item == nil {
235 | return errors.Errorf("no such buffer %+v", bufi)
236 | }
237 | fs.finalizerWriters.Done()
238 | return nil
239 | }
240 |
241 | func (fs *Filesystem) finalizeWriter(bufi *bufferItem) error {
242 | err := bufi.queue.Close()
243 | if err != nil {
244 | return errors.Wrapf(err, "cannot finalize writer %+v", bufi)
245 | }
246 |
247 | if fs.config.Basedir == INMEMORY {
248 | fs.finalizerWriters.Done()
249 | return nil
250 | }
251 |
252 | fs.m.Lock()
253 | bufi.resultKey.Size = bufi.queue.GetSize()
254 | item := fs.buffers.Get(bufi)
255 | fs.m.Unlock()
256 |
257 | if item == nil {
258 | return errors.Errorf("cannot finalize %+v", bufi)
259 | }
260 |
261 | select {
262 | case fs.writeQueue <- bufi:
263 | ktypes.Stat(false, "disk_write", "done", "")
264 | return nil
265 | case <-time.After(fs.config.WriteTimeout):
266 | ktypes.Stat(true, "disk_write", "first_timeout", "")
267 | }
268 |
269 | fs.writeQueue <- bufi
270 | return nil
271 | }
272 |
273 | func (fs *Filesystem) writeMeta(key innerStorage) error {
274 | fs.writeMetaQueue <- key
275 | return nil
276 | }
277 |
278 | func (fs *Filesystem) deleteFile(key innerStorage) error {
279 | fullname := fs.config.buildFullPath(key)
280 | logrus.WithField("stream_name", key.StreamName).Debugf("Deleting %s", fullname)
281 | return os.Remove(fullname)
282 | }
283 |
284 | func (fs *Filesystem) writeFile(bufi *bufferItem) (int64, error) {
285 | fullname := fs.config.buildFullPath(bufi.resultKey)
286 |
287 | err := fs.Md.writeFsInfo(bufi.resultKey, fullname)
288 | if err != nil {
289 | return 0, errors.Wrapf(err, "cannot write json %s", fullname)
290 | }
291 |
292 | file, err := os.OpenFile(fullname, os.O_WRONLY|os.O_CREATE|os.O_EXCL, os.ModePerm)
293 | if err != nil {
294 | return 0, errors.Wrapf(err, "cannot open file %s", fullname)
295 | }
296 | defer file.Close()
297 |
298 | written, err := io.Copy(file, bufi.queue.Oldest())
299 | if err != nil {
300 | return written, errors.Wrapf(err, "cannot read from buffer %s %d", fullname, written)
301 | }
302 | return written, nil
303 | }
304 |
305 | func (fs *Filesystem) walkBuffer(from ktypes.ChunkKey, to ktypes.ChunkKey) (ktypes.SChunkInfo, error) {
306 | if to.Less(from) {
307 | return nil, errors.Errorf("from to out of order")
308 | }
309 |
310 | result := make(ktypes.SChunkInfo, 0, 30)
311 | fs.m.RLock()
312 | defer fs.m.RUnlock()
313 | bufiFrom := &bufferItem{
314 | resultKey: innerStorage{ChunkKey: from},
315 | }
316 |
317 | iterator := func(i btree.Item) bool {
318 | val := i.(*bufferItem)
319 |
320 | if to.Less(val.resultKey.ChunkKey) {
321 | return false
322 | }
323 |
324 | result = append(result, ktypes.ChunkInfo(val.resultKey))
325 | return true
326 | }
327 | fs.buffers.AscendGreaterOrEqual(bufiFrom, iterator)
328 | return result, nil
329 | }
330 |
331 | func readDirNames(dirname string) ([]string, error) {
332 | info, err := os.Stat(dirname)
333 | if err != nil {
334 | return nil, errors.Wrapf(err, "cannot stat %s", dirname)
335 | }
336 |
337 | if !info.IsDir() {
338 | return nil, errors.Errorf("not an directory %s", dirname)
339 | }
340 |
341 | f, err := os.Open(dirname)
342 | if err != nil {
343 | return nil, err
344 | }
345 | defer f.Close()
346 | names, err := f.Readdirnames(-1)
347 |
348 | if err != nil {
349 | return nil, err
350 | }
351 | sort.Strings(names)
352 | return names, nil
353 | }
354 |
355 | func (fs *Filesystem) writerGoroutine() {
356 | for {
357 | select {
358 | case bufi := <-fs.writeQueue:
359 | if bufi == nil {
360 | return
361 | }
362 | _, err := fs.writeFile(bufi)
363 | if err != nil {
364 | logrus.WithField("stream_name", bufi.resultKey.StreamName).Fatalf("Cannot write file %+v", err)
365 | }
366 | err = fs.eraseBuffer(bufi)
367 | if err != nil {
368 | logrus.WithField("stream_name", bufi.resultKey.StreamName).Fatalf("Cannot erase buffer %+v", err)
369 | }
370 | case key := <-fs.writeMetaQueue:
371 | err := fs.Md.writeFsInfo(key, path.Join(fs.config.Basedir, key.buildDirStreamType(key.VirtualStreamType), "metadata.json"))
372 | if err != nil {
373 | logrus.WithField("stream_name", key.StreamName).Fatalf("Cannot write meta file %+v", err)
374 | }
375 | case key, more := <-fs.deleteQueue:
376 | if !more {
377 | continue
378 | }
379 | err := fs.deleteFile(key)
380 | if err != nil {
381 | logrus.WithField("stream_name", key.StreamName).Errorf("Cannot erase file %+v", err)
382 | }
383 | case key, more := <-fs.streamDeletion:
384 | if !more {
385 | continue
386 | }
387 | err := fs.removeEmptystream(key)
388 | if err != nil {
389 | logrus.WithField("stream_name", key).Errorf("Cannot erase stream %+v", err)
390 | }
391 | }
392 |
393 | }
394 | }
395 |
396 | func (fs *Filesystem) removeEmptystream(streamName string) error {
397 | sourceDir := fmt.Sprintf("%s/%s/%s", fs.config.Basedir, streamName, ktypes.SOURCE)
398 | err := os.MkdirAll(sourceDir, os.ModePerm)
399 | if err != nil {
400 | return errors.Wrapf(err, "cannot ensure dir, %s", sourceDir)
401 | }
402 |
403 | hourListFile, err := os.Open(sourceDir)
404 | if err != nil {
405 | return errors.Wrapf(err, "cannot open hours dir for %s, %s", streamName, sourceDir)
406 | }
407 | defer hourListFile.Close()
408 | hours, err := hourListFile.Readdirnames(-1)
409 | if err != nil {
410 | return errors.Wrapf(err, "cannot get hours list for %s", streamName)
411 | }
412 |
413 | if len(hours) != 0 {
414 | ktypes.Stat(false, "removal_check", "remove_whole_dir_skipped", fmt.Sprintf("%d", len(hours)))
415 | return nil
416 | } else {
417 | streamDir := fmt.Sprintf("%s/%s", fs.config.Basedir, streamName)
418 | err := os.RemoveAll(streamDir)
419 | ktypes.Stat(err != nil, "removal_check", "removed_whole_dir", fmt.Sprintf("%d", len(hours)))
420 | }
421 | return nil
422 | }
423 |
424 | func (fs *Filesystem) cleanUpGoroutine() {
425 | for {
426 | free, _ := diskFree(fs.config.Basedir)
427 | now := time.Now()
428 | latestRemoval := now.Add(-fs.config.RemovalTime)
429 | latestRemovalString := ktypes.HourString(ktypes.TimeToMillis(latestRemoval))
430 |
431 | logrus.Debugf("Walk started %+v free %+v removal after %+v", fs.config.Basedir, free, latestRemovalString)
432 |
433 | walkStream := fs.removeOldFiles
434 |
435 | walkStreams := func() error {
436 |
437 | streams, err := readDirNames(fs.config.Basedir)
438 | if err != nil {
439 | return errors.Wrap(err, "cannot get stream list")
440 | }
441 |
442 | for _, streamName := range streams {
443 | time.Sleep(1 * time.Second)
444 | streamTypes, err := readDirNames(path.Join(fs.config.Basedir, streamName))
445 | if err != nil {
446 | return errors.Wrap(err, "cannot get stream list")
447 | }
448 | for _, streamType := range streamTypes {
449 | err := walkStream(streamName, streamType, latestRemovalString)
450 | ktypes.Stat(err != nil, "removal_check", "walk_stream", "")
451 | if err != nil {
452 | logrus.Errorf("cannon delete %+v ", err)
453 | }
454 | }
455 | }
456 | return nil
457 | }
458 |
459 | err := walkStreams()
460 | logrus.Infof("Walk finished %+v", err)
461 |
462 | time.Sleep(10 * time.Second)
463 | }
464 | }
465 |
466 | func diskFree(path string) (uint64, error) {
467 | fs := syscall.Statfs_t{}
468 | err := syscall.Statfs(path, &fs)
469 | if err != nil {
470 | return 0, errors.Wrap(err, "cannot get disk usage")
471 | }
472 | return fs.Bfree * uint64(fs.Bsize), nil
473 | }
474 |
475 | func (fs *Filesystem) ensureEmptyDir(streamName string) error {
476 | select {
477 | case fs.streamDeletion <- streamName:
478 | default:
479 | return errors.Errorf("cannot add deletion check")
480 | }
481 | return nil
482 | }
483 |
484 | func (fs *Filesystem) removeOldFiles(streamName, streamType, latestRemovalString string) error {
485 | sourceDir := fmt.Sprintf("%s/%s/%s", fs.config.Basedir, streamName, streamType)
486 | if _, err := os.Stat(sourceDir); os.IsNotExist(err) {
487 | err := fs.ensureEmptyDir(streamName)
488 | ktypes.Stat(err != nil, "removal_check", "send_check_whole_dir_no_source", "")
489 | return nil
490 | }
491 |
492 | hourListFile, err := os.Open(sourceDir)
493 | if err != nil {
494 | return errors.Wrapf(err, "cannot open hours dir for %s, %s", streamName, sourceDir)
495 | }
496 | defer hourListFile.Close()
497 | hours, err := hourListFile.Readdirnames(-1)
498 | if err != nil {
499 | return errors.Wrapf(err, "cannot get hours list for %s", streamName)
500 | }
501 |
502 | removedCount := 0
503 | for _, hourName := range hours {
504 | time.Sleep(1 * time.Millisecond)
505 |
506 | if strings.Compare(hourName, latestRemovalString) < 0 {
507 | removeHourDir := fmt.Sprintf("%s/%s", sourceDir, hourName)
508 | err := os.RemoveAll(removeHourDir)
509 | ktypes.Stat(err != nil, "removal_check", "removed_hour", "")
510 | logrus.Debugf("removing %s %s %s", hourName, latestRemovalString, removeHourDir)
511 | time.Sleep(100 * time.Millisecond)
512 | removedCount += 1
513 | } else {
514 | ktypes.Stat(false, "removal_check", "skipped_hour", "")
515 | }
516 |
517 | }
518 |
519 | if len(hours) == removedCount {
520 | err := fs.ensureEmptyDir(streamName)
521 | ktypes.Stat(err != nil, "removal_check", "send_check_whole_dir", fmt.Sprintf("%d", len(hours)))
522 | } else {
523 | ktypes.Stat(err != nil, "removal_check", "skip_check_whole_dir", fmt.Sprintf("%d", len(hours)))
524 | }
525 |
526 | return nil
527 | }
528 |
--------------------------------------------------------------------------------
/kfs/key.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "fmt"
5 | "github.com/pkg/errors"
6 | "github.com/VKCOM/kive/ktypes"
7 | "regexp"
8 | "sort"
9 | "strings"
10 | )
11 |
12 | var (
13 | isValidName = regexp.MustCompile(`^[[:alnum:]|[_-]{2,}$`).MatchString
14 | )
15 |
16 | type innerStorage ktypes.ChunkInfo
17 |
18 | func (key *innerStorage) buildRelative() string {
19 | return fmt.Sprintf("%s/%s/%s/%015d.%s.%s.%d.%s.%d.ts", key.StreamName, key.StreamType, key.HourString(), key.Ts, key.StreamName, key.StreamType, key.Discontinuity, key.StreamDesc, key.Rand)
20 | }
21 |
22 | func (key *innerStorage) buildDir() string {
23 | return fmt.Sprintf("%s/%s/%s", key.StreamName, key.StreamType, key.HourString())
24 | }
25 |
26 | func (key *innerStorage) buildDirStreamType(streamType ktypes.StreamType) string {
27 | return fmt.Sprintf("%s/%s/%s", key.StreamName, streamType, key.HourString())
28 | }
29 |
30 | func validateStorageInfo(info innerStorage) error {
31 | if !isValidName(info.StreamDesc) {
32 | return errors.Errorf("bad filename")
33 | }
34 | return validateStorageKey(info.ChunkKey)
35 | }
36 |
37 | func validateStorageKey(key ktypes.ChunkKey) error {
38 | if key.Ts == 0 {
39 | return errors.Errorf("bad time")
40 | }
41 |
42 | if !isValidName(key.StreamName) {
43 | return errors.Errorf("bad stream name")
44 | }
45 | return nil
46 | }
47 |
48 | func parseStorageKey(fname string) (innerStorage, error) {
49 | fname = strings.Replace(fname, ".", " ", -1)
50 | key := innerStorage{}
51 | streamType := ""
52 | n, err := fmt.Sscanf(fname, "%015d_%015d %s %s %d %s %d ts", &key.Ts, &key.SeqId, &key.StreamName, &streamType, &key.Discontinuity, &key.StreamDesc, &key.Rand)
53 | key.StreamType = ktypes.StreamType(streamType)
54 | if n != 7 || err != nil {
55 | return key, errors.Wrapf(err, "%d cannot parse +%s", n, fname)
56 | }
57 | err = validateStorageInfo(key)
58 | if err != nil {
59 | return key, errors.Wrapf(err, "cannot parse +%s", fname)
60 | }
61 | key.Duration = ktypes.DEFAULT_DURATION
62 | return key, nil
63 | }
64 |
65 | func SearchSstorage(a ktypes.SChunkInfo, x innerStorage) int {
66 | return sort.Search(len(a), func(i int) bool { return !a[i].ChunkKey.Less(x.ChunkKey) })
67 | }
68 |
--------------------------------------------------------------------------------
/kfs/kfs_test.go:
--------------------------------------------------------------------------------
1 | package kfs_test
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/suite"
6 |
7 | "io/ioutil"
8 | "math/rand"
9 | "os"
10 | "sync"
11 | "testing"
12 | "time"
13 |
14 | "github.com/stretchr/testify/require"
15 | "github.com/VKCOM/kive/kfs"
16 | "github.com/VKCOM/kive/ktypes"
17 | )
18 |
19 | type KfsTestsSuite struct {
20 | suite.Suite
21 | segments *kfs.Filesystem
22 | path string
23 | tpl ktypes.ChunkInfo
24 | inMemory bool
25 | }
26 |
27 | func (s *KfsTestsSuite) SetupTest() {
28 | path, err := ioutil.TempDir("", "kive_test")
29 | if err != nil {
30 | panic("Cannot run test")
31 | }
32 | s.path = path
33 | config := kfs.NewKfsConfig()
34 | config.RemovalTime = 0
35 | if s.inMemory {
36 | config.Basedir = kfs.INMEMORY
37 | config.RemovalTime = 0
38 | } else {
39 | config.Basedir = path
40 | }
41 | s.segments, err = kfs.NewFilesystem(config)
42 | if err != nil {
43 | panic("Cannot run test")
44 | }
45 |
46 | s.tpl = ktypes.ChunkInfo{
47 | StreamDesc: "test",
48 | Size: 0,
49 | Rand: rand.Int(),
50 | Discontinuity: 1,
51 | ChunkKey: ktypes.ChunkKey{
52 | StreamType: ktypes.SOURCE,
53 | StreamName: "test1",
54 | Ts: 1,
55 | SeqId: 0,
56 | },
57 | }
58 |
59 | }
60 |
61 | func (s *KfsTestsSuite) TearDownTest() {
62 | s.segments.Finalize()
63 | if s.path != "" {
64 | os.RemoveAll(s.path)
65 | }
66 | }
67 |
68 | func (s *KfsTestsSuite) TestWriteReadBuffer() {
69 | wbytes := []byte{1, 2, 3}
70 |
71 | w, _ := s.segments.Writer(s.tpl)
72 |
73 | _, _ = w.Write(wbytes)
74 | w.Close()
75 |
76 | r, _ := s.segments.Reader(s.tpl)
77 | rbytes := []byte{0, 0, 0}
78 | r.Read(rbytes)
79 | assert.Equal(s.T(), wbytes, rbytes)
80 | }
81 |
82 | func (s *KfsTestsSuite) TestReadBeforeWrite() {
83 | wbytes := []byte{1, 2, 3}
84 |
85 | w, _ := s.segments.Writer(s.tpl)
86 | _, _ = w.Write(wbytes)
87 | r, _ := s.segments.Reader(s.tpl)
88 | w.Close()
89 | rbytes := []byte{0, 0, 0}
90 | sw := sync.WaitGroup{}
91 |
92 | sw.Add(1)
93 | go func() {
94 | r.Read(rbytes)
95 | sw.Done()
96 | }()
97 | sw.Wait()
98 |
99 | assert.Equal(s.T(), wbytes, rbytes)
100 | }
101 |
102 | func (s *KfsTestsSuite) TestWriteReadFinalized() {
103 | wbytes := []byte{1, 2, 3}
104 |
105 | w, _ := s.segments.Writer(s.tpl)
106 | _, _ = w.Write(wbytes)
107 | w.Close()
108 |
109 | s.segments.Finalize()
110 |
111 | r, _ := s.segments.Reader(s.tpl)
112 | rbytes := []byte{0, 0, 0}
113 | r.Read(rbytes)
114 | assert.Equal(s.T(), wbytes, rbytes)
115 | }
116 |
117 | func (s *KfsTestsSuite) TestFinalize() {
118 | s.segments.Finalize()
119 | }
120 |
121 | func (s *KfsTestsSuite) TestReadWriteRange() {
122 | wbytes1 := []byte{1, 2, 3}
123 | wbytes2 := []byte{1, 2, 3, 4}
124 | wbytes3 := []byte{1, 2, 3, 4, 5}
125 |
126 | k1 := s.tpl
127 | k2 := s.tpl
128 | k3 := s.tpl
129 |
130 | k1.Ts = 3600 * 1000
131 | k2.Ts = 3700 * 1000
132 | k3.Ts = 7200 * 1000
133 |
134 | w1, _ := s.segments.Writer(k1)
135 | w2, _ := s.segments.Writer(k2)
136 | w3, _ := s.segments.Writer(k3)
137 |
138 | _, _ = w1.Write(wbytes1)
139 | w1.SetChunkDuration(1)
140 | w1.Close()
141 | _, _ = w2.Write(wbytes2)
142 | w2.SetChunkDuration(2)
143 | w2.Close()
144 | _, _ = w3.Write(wbytes3)
145 | w3.SetChunkDuration(3)
146 | w3.Close()
147 |
148 | _, err := s.segments.Reader(s.tpl)
149 | assert.Error(s.T(), err)
150 |
151 | ci, _ := s.segments.Md.Walk(k1.StreamName, k1.StreamType, k1.Ts, k3.Ts)
152 | require.Equal(s.T(), 3, len(ci))
153 | assert.Equal(s.T(), 3, ci[0].Size)
154 | assert.Equal(s.T(), 4, ci[1].Size)
155 | assert.Equal(s.T(), 5, ci[2].Size)
156 |
157 | assert.Equal(s.T(), time.Duration(1), ci[0].Duration)
158 | assert.Equal(s.T(), time.Duration(2), ci[1].Duration)
159 | assert.Equal(s.T(), time.Duration(3), ci[2].Duration)
160 |
161 | }
162 |
163 | func TestStorageTestsSuiteAll(t *testing.T) {
164 | suite.Run(t, new(KfsTestsSuite))
165 | }
166 |
167 | func TestStorageTestsSuiteInMemory(t *testing.T) {
168 | is := new(KfsTestsSuite)
169 | is.inMemory = true
170 | suite.Run(t, is)
171 | }
172 |
--------------------------------------------------------------------------------
/kfs/metadata.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "bufio"
5 | "encoding/json"
6 | "fmt"
7 | "github.com/karlseguin/ccache"
8 | "github.com/pkg/errors"
9 | "github.com/VKCOM/kive/ktypes"
10 | "io"
11 | "io/ioutil"
12 | "os"
13 | "path"
14 | "path/filepath"
15 | "sort"
16 | "sync"
17 | "time"
18 | )
19 |
20 | type Metadata struct {
21 | m sync.RWMutex
22 | config KfsConfig
23 | infoCache *ccache.Cache
24 | }
25 |
26 | func NewMetadata(config KfsConfig) *Metadata {
27 | return &Metadata{
28 | config: config,
29 | infoCache: ccache.New(ccache.Configure().MaxSize(config.MaxCacheSize).Buckets(64)),
30 | }
31 | }
32 |
33 | //TODO duration instead to
34 | func (md *Metadata) Walk(streamName string, streamType ktypes.StreamType, fromTs ktypes.UnixMs, toTs ktypes.UnixMs) ([]ktypes.ChunkInfo, error) {
35 |
36 | dirData, err := md.getMetadata(streamName, string(streamType), fromTs, toTs)
37 |
38 | if err != nil {
39 | return nil, errors.Wrap(err, "bad walk")
40 | }
41 |
42 | sort.Sort(dirData)
43 |
44 | return []ktypes.ChunkInfo(dirData), nil
45 | }
46 |
47 | func (md *Metadata) GetSources(streamName string) ([]string, error) {
48 | sources, err := readDirNames(path.Join(md.config.Basedir, streamName))
49 | if err != nil {
50 | return nil, err
51 | }
52 | return sources, nil
53 | }
54 |
55 | func (md *Metadata) GetAllChunksInfo(streamName string, from, to ktypes.UnixMs) (map[string]ktypes.SChunkInfo, error) {
56 | sources, err := md.GetSources(streamName)
57 | if err != nil {
58 | return nil, nil
59 | }
60 | result := make(map[string]ktypes.SChunkInfo, 3)
61 | for _, source := range sources {
62 | streamChunksInfo, err := md.getMetadata(streamName, source, from, to)
63 | if err != nil {
64 | continue
65 | }
66 | result[source] = streamChunksInfo
67 | }
68 | return result, nil
69 | }
70 |
71 | func (md *Metadata) GetLast(streamName, sourceType string, count int, timeoutLimit ktypes.UnixMs) (ktypes.SChunkInfo, error) {
72 | timeMs := ktypes.UnixMs(time.Duration(time.Now().Unix() * 1000))
73 | chunks, err := md.getMetadata(streamName, sourceType, timeMs-timeoutLimit, timeMs)
74 | if err != nil {
75 | return nil, err
76 | }
77 | if len(chunks) > count {
78 | return append(ktypes.SChunkInfo{}, chunks[len(chunks)-count:]...), nil
79 | } else {
80 | return chunks, nil
81 | }
82 | }
83 |
84 | func (md *Metadata) getMetadata(streamName string, sourceName string, from ktypes.UnixMs, to ktypes.UnixMs) (ktypes.SChunkInfo, error) {
85 | result := make(ktypes.SChunkInfo, 0)
86 | for _, hr := range ktypes.HoursRange(from, to) {
87 | hourInfo, _ := md.fetchManifest(streamName, sourceName, hr)
88 | for _, chunk := range hourInfo {
89 | if chunk.Ts >= from && chunk.Ts <= to {
90 | result = append(result, chunk)
91 | }
92 | }
93 | }
94 | return result, nil
95 | }
96 |
97 | func (md *Metadata) fetchManifest(streamName, sourceName, hour string) ([]ktypes.ChunkInfo, error) {
98 | value := md.infoCache.Get(ktypes.ComposeHourKeyCache(streamName, sourceName, hour))
99 | if value != nil && !value.Expired() {
100 | return value.Value().([]ktypes.ChunkInfo), nil
101 | }
102 |
103 | result, err := md.fetchManifestFromFile(streamName, sourceName, hour)
104 | if err != nil {
105 | return nil, err
106 | }
107 |
108 | md.m.RLock()
109 | defer md.m.RUnlock()
110 | value = md.infoCache.Get(ktypes.ComposeHourKeyCache(streamName, sourceName, hour))
111 | if value != nil {
112 | md.infoCache.Set(ktypes.ComposeHourKeyCache(streamName, sourceName, hour), &result, md.config.RemovalTime-2*time.Hour)
113 | }
114 | return result, nil
115 | }
116 |
117 | func (md *Metadata) fetchManifestFromFile(streamName, sourceName, hour string) ([]ktypes.ChunkInfo, error) {
118 | file, err := os.OpenFile(md.config.buildMetadataPathPlain(streamName, sourceName, hour), os.O_RDONLY, os.ModeExclusive)
119 | if err != nil {
120 | return nil, err
121 | }
122 |
123 | result := make([]ktypes.ChunkInfo, 0, 300)
124 | entry := innerStorage{}
125 | reader := bufio.NewScanner(file)
126 | for reader.Scan() {
127 | if err := json.Unmarshal(reader.Bytes(), &entry); err == nil {
128 | result = append(result, ktypes.ChunkInfo(entry))
129 | }
130 | }
131 | return result, nil
132 | }
133 |
134 | func (md *Metadata) writeFsInfo(key innerStorage, fullname string) error {
135 | if _, err := os.Stat(filepath.Dir(fullname)); !os.IsExist(err) {
136 | err := os.MkdirAll(filepath.Dir(fullname), os.ModePerm)
137 | if err != nil {
138 | return errors.Wrapf(err, "cannot create directories for %s", fullname)
139 | }
140 | streamTypePath := key.StreamType
141 | if key.Virtual == true {
142 | streamTypePath = key.VirtualStreamType
143 | }
144 | fileMetadata, err := os.OpenFile(md.config.buildMetadataPathStreamType(key, streamTypePath), os.O_CREATE, os.ModePerm)
145 | if err != nil {
146 | return err
147 | }
148 | fileMetadata.Close()
149 | }
150 |
151 | streamTypePath := key.StreamType
152 | if key.Virtual == true {
153 | streamTypePath = key.VirtualStreamType
154 | }
155 | f, err := os.OpenFile(md.config.buildMetadataPathStreamType(key, streamTypePath), os.O_APPEND|os.O_WRONLY, os.ModeAppend)
156 | if err != nil {
157 | return md.writeFsInfoFallback(key, fullname) //fallback for an old scheme
158 | }
159 |
160 | jBytes, err := json.Marshal(key)
161 | if err != nil {
162 | return errors.Wrapf(err, "cannot serialize file %+v", key)
163 | }
164 |
165 | n, err := f.Write(append([]byte{'\n'}, jBytes...))
166 | if err == nil && n < len(jBytes) {
167 | return errors.Wrap(io.ErrShortWrite, "error while write meta")
168 | }
169 | if err := f.Close(); err == nil {
170 | return errors.Wrap(err, "error while write meta")
171 | }
172 |
173 | var manifestContent []ktypes.ChunkInfo
174 | value := md.infoCache.Get(ktypes.ComposeHourKeyCache(key.StreamName, string(key.StreamType), key.HourString()))
175 | if value == nil {
176 | manifestContent, err = md.fetchManifestFromFile(key.StreamName, string(key.StreamType), key.HourString())
177 | if err != nil {
178 | return err
179 | }
180 | } else {
181 | manifestContent = value.Value().([]ktypes.ChunkInfo)
182 | }
183 |
184 | var updatedChunkInfoList []ktypes.ChunkInfo
185 | updatedChunkInfoList = append(updatedChunkInfoList, manifestContent...)
186 | updatedChunkInfoList = append(updatedChunkInfoList, ktypes.ChunkInfo(key))
187 |
188 | md.m.Lock()
189 | defer md.m.Unlock()
190 | md.infoCache.Set(ktypes.ComposeHourKeyCache(key.StreamName, string(key.StreamType), key.HourString()), &updatedChunkInfoList, md.config.RemovalTime-2*time.Hour)
191 | return nil
192 | }
193 |
194 | func (md *Metadata) writeFsInfoFallback(key innerStorage, fullname string) error {
195 | jBytes, err := json.Marshal(key)
196 | if err != nil {
197 | return errors.Wrapf(err, "cannot serialize file %+v", key)
198 | }
199 |
200 | if err := ioutil.WriteFile(fmt.Sprintf("%s.json", fullname), jBytes, os.ModePerm); err != nil {
201 | return errors.Wrap(err, "cannot write meta")
202 | }
203 | md.infoCache.Set(fullname, &key, md.config.RemovalTime-2*time.Hour)
204 | return nil
205 | }
206 |
207 | func (md *Metadata) readFsInfo(key innerStorage, fullname string) (*innerStorage, error) {
208 | cached := md.infoCache.Get(fullname)
209 | if cached != nil && !cached.Expired() {
210 | return cached.Value().(*innerStorage), nil
211 | }
212 | jData := innerStorage{}
213 | jBytes, err := ioutil.ReadFile(fmt.Sprintf("%s.json", fullname))
214 | if err != nil {
215 | return &innerStorage{}, errors.Wrap(err, "Cannot read chunk info")
216 | }
217 |
218 | err = json.Unmarshal(jBytes, &jData)
219 | if err != nil {
220 | return &jData, errors.Wrap(err, "cannot unmarshal")
221 | }
222 | md.infoCache.Set(fullname, &key, time.Duration(key.Ts)-md.config.RemovalTime-2*time.Hour)
223 | return &jData, nil
224 | }
225 |
226 | func (md *Metadata) getExtended(ci ktypes.ChunkInfo) (int, time.Duration, error) {
227 | key := innerStorage(ci)
228 |
229 | fullname := md.config.buildFullPath(key)
230 | jData, err := md.readFsInfo(key, fullname)
231 | if err != nil {
232 | return 0, 0, errors.Wrap(err, "Cannot unmarshall chunk info")
233 | }
234 |
235 | return int(jData.Size), jData.Duration, nil
236 | }
237 |
--------------------------------------------------------------------------------
/kfs/queue.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "github.com/pkg/errors"
5 | "io"
6 | "sync"
7 | )
8 |
9 | type queue struct {
10 | maxSize int
11 | lock sync.RWMutex
12 | cond *sync.Cond
13 | buf []byte
14 | closed bool
15 | }
16 |
17 | func NewQueue(maxSize int) *queue {
18 | q := &queue{
19 | buf: make([]byte, 0, 1024000),
20 | maxSize: maxSize,
21 | }
22 | q.cond = sync.NewCond(q.lock.RLocker())
23 | return q
24 | }
25 |
26 | func (q *queue) Close() error {
27 | q.lock.Lock()
28 | defer q.lock.Unlock()
29 | q.closed = true
30 | q.cond.Broadcast()
31 | return nil
32 | }
33 |
34 | func (q *queue) Write(data []byte) (int, error) {
35 | q.lock.Lock()
36 | defer q.lock.Unlock()
37 |
38 | q.cond.Broadcast()
39 |
40 | if q.closed {
41 | return 0, errors.New("queue already closed")
42 | }
43 |
44 | if len(q.buf) > q.maxSize {
45 | q.closed = true
46 | return 0, errors.New("queue too long")
47 | }
48 |
49 | q.buf = append(q.buf, data...)
50 | return len(data), nil
51 | }
52 |
53 | func (q *queue) Oldest() *queueCursor {
54 | cursor := &queueCursor{
55 | que: q,
56 | }
57 | return cursor
58 | }
59 |
60 | func (q *queue) GetSize() int {
61 | q.cond.L.Lock()
62 | defer q.cond.L.Unlock()
63 | for {
64 | buf := q.buf
65 |
66 | if q.closed {
67 | return len(buf)
68 | }
69 |
70 | q.cond.Wait()
71 | }
72 | }
73 |
74 | type queueCursor struct {
75 | que *queue
76 | closed bool
77 | pos int
78 | }
79 |
80 | func (qc *queueCursor) Read(out []byte) (n int, err error) {
81 | qc.que.cond.L.Lock()
82 | defer qc.que.cond.L.Unlock()
83 |
84 | for {
85 | buf := qc.que.buf
86 |
87 | if qc.que.closed && qc.pos == len(buf) {
88 | err = io.EOF
89 | return
90 | }
91 |
92 | if qc.closed {
93 | err = io.EOF
94 | return
95 | }
96 |
97 | if qc.pos < len(buf) {
98 | n = copy(out, buf[qc.pos:])
99 | qc.pos += n
100 | return
101 | }
102 | qc.que.cond.Wait()
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/kfs/utils.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "fmt"
5 | "github.com/VKCOM/kive/ktypes"
6 | "path"
7 | )
8 |
9 | func (config KfsConfig) buildFullPath(key innerStorage) string {
10 | return fmt.Sprintf("%s/%s", config.Basedir, key.buildRelative())
11 | }
12 |
13 | func (config KfsConfig) buildDirPath(key innerStorage) string {
14 | return fmt.Sprintf("%s/%s", config.Basedir, key.buildDir())
15 | }
16 |
17 | func (config KfsConfig) buildMetadataPath(key innerStorage) string {
18 | return path.Join(config.Basedir, key.buildDir(), "metadata.json")
19 | }
20 |
21 | func (config KfsConfig) buildMetadataPathStreamType(key innerStorage, streamType ktypes.StreamType) string {
22 | return path.Join(config.Basedir, key.buildDirStreamType(streamType), "metadata.json")
23 | }
24 |
25 | func (config KfsConfig) buildMetadataPathPlain(streamName, sourceName, hour string) string {
26 | return path.Join(config.Basedir, streamName, sourceName, hour, "metadata.json")
27 | }
28 |
--------------------------------------------------------------------------------
/kfs/writer.go:
--------------------------------------------------------------------------------
1 | package kfs
2 |
3 | import (
4 | "github.com/pkg/errors"
5 | "github.com/VKCOM/kive/ktypes"
6 | "io"
7 | "time"
8 | )
9 |
10 | type Writer struct {
11 | io.WriteCloser
12 | item *bufferItem
13 | fs *Filesystem
14 | }
15 |
16 | func (w *Writer) Write(data []byte) (int, error) {
17 | return w.item.queue.Write(data)
18 | }
19 |
20 | func (w *Writer) SetChunkDuration(dur time.Duration) {
21 | w.fs.m.Lock()
22 | defer w.fs.m.Unlock()
23 | w.item.resultKey.Duration = dur
24 | }
25 |
26 | func (w *Writer) Close() error {
27 | start := time.Now()
28 | err := w.fs.finalizeWriter(w.item)
29 | t := time.Now()
30 | elapsed := t.Sub(start)
31 | ktypes.Stat(err != nil, "disk_write", "timing", ktypes.TimeToStat(elapsed))
32 |
33 | if err != nil {
34 | return errors.Wrapf(err, "cannot finalize writer %+v", w.item.resultKey)
35 | }
36 | return nil
37 | }
38 |
--------------------------------------------------------------------------------
/ktypes/abr.go:
--------------------------------------------------------------------------------
1 | package ktypes
2 |
3 | import (
4 | "github.com/VKCOM/joy4/av"
5 | "io"
6 | )
7 |
8 | type Abr interface {
9 | Init(desiredOutputSizes []int, incomingStream av.Demuxer, streamName string) ([]AbrDemuxer, []int, error)
10 | av.PacketWriter
11 | io.Closer
12 | }
13 |
14 | type AbrDemuxer interface {
15 | Size() int
16 | Desc() string
17 | av.DemuxCloser
18 | }
19 |
--------------------------------------------------------------------------------
/ktypes/api.go:
--------------------------------------------------------------------------------
1 | package ktypes
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | type StreamInfo struct {
9 | Width int
10 | Height int
11 | ChunkStartTime UnixMs
12 | }
13 |
14 | var (
15 | ApiInst Api
16 | )
17 |
18 | type Api interface {
19 | OnPublish(streamName, appName string, params map[string]string) (Stream, error)
20 | AllowView(streamName, salt string) (isAllowed bool)
21 | Stat(isError bool, event string, context string, extra string)
22 | ReadConfig(configPath string, configInterface interface{}) (interface{}, error)
23 | GetTranscoder() (Abr, error)
24 | Serve() error
25 | }
26 |
27 | type Stream interface {
28 | StreamName() string
29 | NotifyStreaming(StreamInfo)
30 | AllowStreaming() (isAllowed bool)
31 | Disconnect()
32 | }
33 |
34 | func TimeToStat(dt time.Duration) string {
35 | ms := 100 * int64(dt/(time.Millisecond*100))
36 | return fmt.Sprintf("%d", ms)
37 | }
38 |
39 | func Stat(isError bool, event string, context string, extra string) {
40 | ApiInst.Stat(isError, event, context, extra)
41 | }
42 |
--------------------------------------------------------------------------------
/ktypes/debug.go:
--------------------------------------------------------------------------------
1 | package ktypes
2 |
3 | var (
4 | Recover = true
5 | )
6 |
--------------------------------------------------------------------------------
/ktypes/key.go:
--------------------------------------------------------------------------------
1 | package ktypes
2 |
3 | import (
4 | "encoding/base32"
5 | "fmt"
6 | "github.com/pkg/errors"
7 | "strings"
8 | "time"
9 | )
10 |
11 | type StreamType string
12 |
13 | const (
14 | SOURCE StreamType = "source"
15 | DEFAULT_DURATION = 4 * time.Second
16 | SECURTIY_HR_LIMIT = 50
17 | )
18 |
19 | type UnixMs int64
20 | type ChunkKey struct {
21 | Ts UnixMs `json:"ts"`
22 | SeqId int64 `json:"seqId"`
23 | StreamName string `json:"stream_name"`
24 | StreamType StreamType `json:"stream_type"`
25 | Virtual bool `json:"virtual"`
26 | VirtualStreamType StreamType `json:"virtual_stream_type"`
27 | }
28 |
29 | func (ck *ChunkKey) StreamName32Enc() string {
30 | return strings.Replace(base32.StdEncoding.EncodeToString([]byte(ck.StreamName)), "=", "", -1)
31 | }
32 |
33 | func (ck *ChunkKey) HourString() string {
34 | return HourString(ck.Ts)
35 | }
36 |
37 | func (ck *ChunkKey) Duration() time.Duration {
38 | return time.Duration(ck.Ts) * time.Millisecond
39 | }
40 |
41 | func HourString(ts UnixMs) string {
42 | hour := int64(time.Duration(ts)*time.Millisecond) / int64(time.Hour)
43 | return fmt.Sprintf("%015d", hour)
44 | }
45 |
46 | func HoursRange(from, to UnixMs) []string {
47 | fromHr := int64(time.Duration(from)*time.Millisecond) / int64(time.Hour)
48 | toHr := int64(time.Duration(to)*time.Millisecond) / int64(time.Hour)
49 |
50 | result := make([]string, 0)
51 | if fromHr > toHr {
52 | return nil
53 | }
54 |
55 | if toHr-fromHr > SECURTIY_HR_LIMIT {
56 | return nil
57 | }
58 |
59 | result = append(result, fmt.Sprintf("%015d", fromHr))
60 | var i int64
61 | for i = 0; i < toHr-fromHr; i++ {
62 | result = append(result, fmt.Sprintf("%015d", fromHr+i+1))
63 | }
64 | return result
65 | }
66 |
67 | func ComposeHourKeyCache(streamName, sourceName, hourName string) string {
68 | return fmt.Sprintf("%s/%s/%s", streamName, sourceName, hourName)
69 | }
70 |
71 | type ChunkInfo struct {
72 | ChunkKey
73 | StreamDesc string `json:"stream_desc"`
74 | Discontinuity int `json:"disc"`
75 | Rand int `json:"rand"`
76 | Size int `json:"size"` //not used in path
77 | Duration time.Duration `json:"duration"` //not used in path
78 | }
79 |
80 | func NewChunkInfo(streamName string, sourceType StreamType) ChunkInfo {
81 | key := ChunkInfo{
82 | ChunkKey: ChunkKey{
83 | StreamType: sourceType,
84 | StreamName: streamName,
85 | },
86 | Discontinuity: 1,
87 | }
88 | return key
89 | }
90 |
91 | func (ck *ChunkInfo) StreamDesc32Enc() string {
92 | return strings.Replace(base32.StdEncoding.EncodeToString([]byte(ck.StreamDesc)), "=", "", -1)
93 | }
94 |
95 | func (ck *ChunkKey) Less(rhs ChunkKey) bool {
96 | nameLess := strings.Compare(ck.StreamName, rhs.StreamName)
97 | if nameLess < 0 {
98 | return true
99 | } else if nameLess > 0 {
100 | return false
101 | }
102 | typeLess := strings.Compare(string(ck.StreamType), string(rhs.StreamType))
103 | if typeLess < 0 {
104 | return true
105 | } else if typeLess > 0 {
106 | return false
107 | }
108 |
109 | if ck.Ts < rhs.Ts {
110 | return true
111 | } else if ck.Ts > rhs.Ts {
112 | return false
113 | }
114 |
115 | if ck.SeqId < rhs.SeqId {
116 | return true
117 | } else if ck.SeqId > rhs.SeqId {
118 | return false
119 | }
120 |
121 | return false
122 | }
123 |
124 | type SChunkInfo []ChunkInfo
125 |
126 | func (p SChunkInfo) Len() int { return len(p) }
127 | func (p SChunkInfo) Less(i, j int) bool { return p[i].ChunkKey.Less(p[j].ChunkKey) }
128 | func (p SChunkInfo) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
129 |
130 | func TimeToMillis(ts time.Time) UnixMs {
131 | return UnixMs(int64(ts.UnixNano()) / int64(time.Millisecond))
132 | }
133 |
134 | func (key ChunkInfo) BuildChunkName() string {
135 | return fmt.Sprintf("%015d_%015d_%s_%s_d%d_%s_r%d.ts", key.Ts, 0, key.StreamName32Enc(), key.StreamType, key.Discontinuity, key.StreamDesc32Enc(), key.Rand)
136 | }
137 |
138 | func (key ChunkInfo) BuildVodChunkName() string {
139 | return fmt.Sprintf("%015d_%015d_%s_d%d_%s_r%d.ts", key.Ts, 0, key.StreamType, key.Discontinuity, key.StreamDesc32Enc(), key.Rand)
140 | }
141 |
142 | func ParseChunkName(chunkNameEncoded string) (ChunkInfo, error) {
143 | chunkNameEncoded = strings.Replace(chunkNameEncoded, "_", " ", -1)
144 | key := ChunkInfo{}
145 | var (
146 | streamNameEnc32 []byte
147 | streamDescEnc32 []byte
148 | )
149 | n, err := fmt.Sscanf(chunkNameEncoded, "%015d 000000000000000 %s %s d%d %s r%d.ts", &key.Ts, &streamNameEnc32, &key.StreamType, &key.Discontinuity, &streamDescEnc32, &key.Rand)
150 | if n != 6 || err != nil {
151 | return key, errors.Wrapf(err, "cannot parse %+s", chunkNameEncoded)
152 | }
153 |
154 | padding := ""
155 | switch len(streamNameEnc32) % 8 {
156 | case 2:
157 | padding = "======"
158 | case 4:
159 | padding = "===="
160 | case 5:
161 | padding = "==="
162 | case 7:
163 | padding = "=="
164 | }
165 | streamNameEnc32 = append(streamNameEnc32, []byte(padding)...)
166 |
167 | streamName, err := base32.StdEncoding.DecodeString(string(streamNameEnc32))
168 | if err != nil {
169 | return key, errors.Wrap(err, "cannot decode")
170 | }
171 | key.StreamName = string(streamName)
172 |
173 | padding = ""
174 | switch len(streamDescEnc32) % 8 {
175 | case 2:
176 | padding = "======"
177 | case 4:
178 | padding = "===="
179 | case 5:
180 | padding = "==="
181 | case 7:
182 | padding = "=="
183 | }
184 | streamDescEnc32 = append(streamDescEnc32, []byte(padding)...)
185 |
186 | streamDesc, err := base32.StdEncoding.DecodeString(string(streamDescEnc32))
187 | if err != nil {
188 | return key, errors.Wrap(err, "cannot decode")
189 | }
190 | key.StreamDesc = string(streamDesc)
191 |
192 | return key, nil
193 | }
194 |
--------------------------------------------------------------------------------
/ktypes/key_test.go:
--------------------------------------------------------------------------------
1 | package ktypes
2 |
3 | import (
4 | "testing"
5 | "time"
6 | )
7 |
8 | func TestHoursRange(t *testing.T) {
9 | r := HoursRange(1637951101000, 1537951101000)
10 | if len(r) != 0 {
11 | t.Errorf("wrog HoursRange: %+v", HoursRange(1537951101000, 1537951101000))
12 | }
13 |
14 | r = HoursRange(1537951101000, 1537951101000)
15 | if r[0] != "000000000427208" || len(r) != 1 {
16 | t.Errorf("wrog HoursRange: %+v", HoursRange(1537951101000, 1537951101000))
17 | }
18 |
19 | r = HoursRange(1537951101000, 1537954701000)
20 | if r[0] != "000000000427208" || r[1] != "000000000427209" || len(r) != 2 {
21 | t.Errorf("wrog HoursRange: %+v", HoursRange(1537951101000, 1537954701000))
22 | }
23 | }
24 |
25 | func TestChunkKey_Duration(t *testing.T) {
26 | ci := ChunkKey{Ts: 5000}
27 | if ci.Duration() != 5*time.Second {
28 | t.Errorf("bad duration")
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "github.com/sirupsen/logrus"
6 | "github.com/VKCOM/kive/ktypes"
7 | "github.com/VKCOM/kive/noop_api"
8 | "github.com/VKCOM/kive/worker"
9 | "os"
10 | "os/signal"
11 | "syscall"
12 | )
13 |
14 | func init() {
15 | logrus.SetLevel(logrus.DebugLevel)
16 | logrus.Info("Initializing kive")
17 | ktypes.ApiInst = &noop_api.NoopApi{}
18 | }
19 |
20 | func main() {
21 | configPath := flag.String("config", "default", "configuration path")
22 | flag.Parse()
23 |
24 | c := worker.NewConfig(*configPath)
25 |
26 | err := ktypes.ApiInst.Serve()
27 |
28 | if err != nil {
29 | logrus.Panic("Cannot start api ", err)
30 | }
31 |
32 | w, err := worker.NewWorker(c)
33 | if err != nil {
34 | logrus.Panic("Cannot create worker ", err)
35 | }
36 |
37 | err = w.Listen()
38 | if err != nil {
39 | logrus.Panic("Cannot listen worker ", err)
40 | }
41 |
42 | err = w.Serve()
43 | if err != nil {
44 | logrus.Panic("Cannot serve worker ", err)
45 | }
46 |
47 | sigch := make(chan os.Signal)
48 | signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
49 | logrus.Info(<-sigch)
50 | w.Stop()
51 | }
52 |
--------------------------------------------------------------------------------
/media/rtmp_demuxer.go:
--------------------------------------------------------------------------------
1 | package media
2 |
3 | import (
4 | "github.com/VKCOM/joy4/av"
5 | "github.com/VKCOM/joy4/av/avutil"
6 | "github.com/VKCOM/joy4/format/flv"
7 | "github.com/VKCOM/joy4/format/ts"
8 | "github.com/pkg/errors"
9 | "github.com/sirupsen/logrus"
10 | "io"
11 | "time"
12 | )
13 |
14 | type kfChunkerState int
15 |
16 | const (
17 | INITIAL kfChunkerState = iota
18 | WAITING_KF
19 | SPLITTING
20 | HOLD_ON_KF
21 | FINISHED
22 | ERROR
23 |
24 | maxPacketPerChunk = 2000
25 | )
26 |
27 | type SegmentInfo struct {
28 | Duration time.Duration
29 | Width int
30 | Height int
31 | }
32 |
33 | type keyFrameChunker struct {
34 | av.Demuxer
35 | state kfChunkerState
36 | lastKeyframe av.Packet
37 | durMin time.Duration
38 | lastTs time.Duration
39 | currentDuration time.Duration
40 | pktCounter int
41 | }
42 |
43 | func (kf *keyFrameChunker) isVideo(pk *av.Packet) bool {
44 | streams, err := kf.Demuxer.Streams()
45 | if err != nil {
46 | return false
47 | }
48 |
49 | if len(streams) <= int(pk.Idx) {
50 | return false
51 | }
52 |
53 | t := streams[pk.Idx]
54 | return t.Type().IsVideo()
55 | }
56 |
57 | func (kf *keyFrameChunker) readChecked() (av.Packet, error) {
58 | pk, err := kf.Demuxer.ReadPacket()
59 | kf.pktCounter += 1
60 | if err != nil {
61 | return pk, err
62 | }
63 |
64 | if kf.pktCounter > maxPacketPerChunk {
65 | return pk, errors.New("too many packets")
66 | }
67 | return pk, err
68 | }
69 |
70 | func (kf *keyFrameChunker) ReadPacket() (av.Packet, error) {
71 | switch kf.state {
72 | case FINISHED:
73 | return av.Packet{}, io.EOF
74 | case ERROR:
75 | return av.Packet{}, io.ErrUnexpectedEOF
76 | case INITIAL:
77 | pk, err := kf.readChecked()
78 | if err == io.EOF || err != nil {
79 | kf.state = ERROR
80 | return pk, errors.Wrap(err, "error on reading")
81 | } else if pk.IsKeyFrame {
82 | kf.state = SPLITTING
83 | kf.lastTs = pk.Time
84 | return pk, nil
85 | } else if kf.isVideo(&pk) {
86 | kf.state = ERROR
87 | return pk, errors.Wrap(err, "error on reading")
88 | }
89 |
90 | if !kf.isVideo(&pk) {
91 | kf.lastTs = pk.Time
92 | }
93 | return pk, nil
94 | case WAITING_KF:
95 | pk, err := kf.readChecked()
96 | if err == io.EOF || err != nil {
97 | kf.state = ERROR
98 | return pk, errors.Wrap(err, "error on reading")
99 | } else if pk.IsKeyFrame {
100 | kf.state = SPLITTING
101 | } else if kf.isVideo(&pk) {
102 | kf.state = ERROR
103 | return pk, errors.Wrap(err, "error on reading")
104 | }
105 | return pk, nil
106 | case SPLITTING:
107 | pk, err := kf.readChecked()
108 | if err == io.EOF {
109 | kf.state = FINISHED
110 | return pk, io.EOF
111 | } else if err != nil {
112 | kf.state = ERROR
113 | return pk, errors.Wrap(err, "error on reading")
114 | }
115 |
116 | if pk.IsKeyFrame && pk.Time-kf.lastTs > kf.durMin {
117 | kf.lastKeyframe = pk
118 | kf.state = HOLD_ON_KF
119 | return pk, io.EOF
120 | }
121 |
122 | if !kf.isVideo(&pk) {
123 | kf.currentDuration = pk.Time - kf.lastTs
124 | }
125 |
126 | return pk, nil
127 |
128 | case HOLD_ON_KF:
129 | kf.state = SPLITTING
130 | kf.pktCounter = 0
131 | kf.lastTs = kf.lastKeyframe.Time
132 | return kf.lastKeyframe, nil
133 | }
134 |
135 | return av.Packet{}, errors.New("unreachable state")
136 | }
137 |
138 | type SegmentDemuxer struct {
139 | chunker *keyFrameChunker
140 | muxer *ts.Muxer
141 | }
142 |
143 | func NewSegmentDemuxer(demuxer av.Demuxer, minSegmentDuration time.Duration) *SegmentDemuxer {
144 | return &SegmentDemuxer{
145 | chunker: &keyFrameChunker{
146 | Demuxer: demuxer,
147 | state: INITIAL,
148 | durMin: minSegmentDuration,
149 | },
150 | muxer: ts.NewMuxer(nil),
151 | }
152 | }
153 |
154 | func (sd *SegmentDemuxer) WriteNext(w io.Writer) (SegmentInfo, error) {
155 | sd.muxer = ts.NewMuxer(w)
156 | si := SegmentInfo{}
157 | err := avutil.CopyFile(sd.muxer, sd.chunker)
158 |
159 | if sd.chunker.currentDuration <= 0 {
160 | logrus.Error("bad duration:")
161 | si.Duration = 4
162 | } else {
163 | si.Duration = sd.chunker.currentDuration
164 | }
165 |
166 | if err != nil {
167 | return si, errors.Wrap(err, "error on copy")
168 | }
169 |
170 | if streams, err := sd.Streams(); err == nil {
171 | if md, err := flv.NewMetadataByStreams(streams); err == nil {
172 | h, _ := md["height"]
173 | w, _ := md["width"]
174 | si.Height = h.(int)
175 | si.Width = w.(int)
176 | }
177 | }
178 |
179 | if sd.chunker.state == FINISHED || sd.chunker.state == ERROR {
180 | return si, io.EOF
181 | }
182 |
183 | return si, nil
184 | }
185 |
186 | func (sd *SegmentDemuxer) Streams() ([]av.CodecData, error) {
187 | return sd.chunker.Streams()
188 | }
189 |
--------------------------------------------------------------------------------
/media/rtmp_demuxer_test.go:
--------------------------------------------------------------------------------
1 | package media
2 |
3 | import (
4 | "fmt"
5 | "github.com/VKCOM/joy4/format/flv"
6 | "github.com/sirupsen/logrus"
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | "io"
10 | "io/ioutil"
11 | "os"
12 | "os/exec"
13 | "path/filepath"
14 | "runtime"
15 | "testing"
16 | "time"
17 | )
18 |
19 | func TestSplitMuxing(t *testing.T) {
20 | _, filename, _, _ := runtime.Caller(0)
21 |
22 | dirname := filepath.Dir(filename)
23 | rpath := dirname + "/../../test_data/t2.flv"
24 | rpathAnother := dirname + "/../../test_data/l1.flv"
25 |
26 | rfile, err := os.Open(rpath)
27 | logrus.Info(rpath, err)
28 |
29 | flvReader := flv.NewDemuxer(rfile)
30 | chunker := NewSegmentDemuxer(flvReader, 3100*time.Millisecond)
31 |
32 | path, _ := ioutil.TempDir("", "kive_chunker")
33 | os.MkdirAll(filepath.Dir(path), os.ModePerm)
34 |
35 | filelist := make([]string, 0)
36 | ts := []int{4939000000, 5436000000, 5383000000, 4279000000}
37 | for i := 0; ; i += 1 {
38 | wpath := fmt.Sprintf("%s/%d.ts", path, i)
39 | logrus.Info(wpath)
40 | wfile, _ := os.Create(wpath)
41 | filelist = append(filelist, fmt.Sprintf("%d.ts", i))
42 | si, err := chunker.WriteNext(wfile)
43 | require.Equal(t, ts[i], int(si.Duration))
44 |
45 | assert.Equal(t, 1280, si.Width)
46 | assert.Equal(t, 720, si.Height)
47 | wfile.Close()
48 | if err == io.EOF {
49 | break
50 | } else if err != nil {
51 | logrus.Infof("%+v", err)
52 | break
53 | }
54 | }
55 |
56 | list := ""
57 | for _, str := range filelist {
58 | list += "file '" + str + "'\n"
59 | }
60 |
61 | lfile := fmt.Sprintf("%s/list.txt", path)
62 |
63 | fullOutfile := fmt.Sprintf("%s/full.mp4", path)
64 | ioutil.WriteFile(lfile, []byte(list), os.ModePerm)
65 | logrus.Info(fullOutfile)
66 |
67 | cmd, err := exec.Command("ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", lfile, "-c", "copy", fullOutfile).CombinedOutput()
68 | if err != nil {
69 | logrus.Errorf("%s %+v", cmd, err)
70 | }
71 |
72 | cmpExec := dirname + "/../../test_data/framemd5cmp"
73 | cmd, err = exec.Command(cmpExec, fullOutfile, rpath, path).CombinedOutput()
74 |
75 | assert.NoError(t, err)
76 |
77 | b, err := ioutil.ReadFile(path + "/full_vs_t2_diff.txt")
78 | assert.NoError(t, err)
79 | assert.Equal(t, 0, len(b))
80 |
81 | cmd, err = exec.Command(cmpExec, fullOutfile, rpathAnother, path).CombinedOutput()
82 | b, err = ioutil.ReadFile(path + "/full_vs_l1_diff.txt")
83 |
84 | assert.NoError(t, err)
85 | assert.NotEqual(t, 0, len(b))
86 |
87 | }
88 |
--------------------------------------------------------------------------------
/noop_api/noop_api.go:
--------------------------------------------------------------------------------
1 | package noop_api
2 |
3 | import (
4 | "github.com/BurntSushi/toml"
5 | "github.com/pkg/errors"
6 | "github.com/sirupsen/logrus"
7 | "github.com/VKCOM/kive/ktypes"
8 | "github.com/VKCOM/kive/worker"
9 | "io/ioutil"
10 | )
11 |
12 | type NoopApi struct {
13 | }
14 |
15 | func (na *NoopApi) OnPublish(streamName, appName string, params map[string]string) (ktypes.Stream, error) {
16 | //parse and check stream signature
17 | //send to blocking task queue 'streams' [owner_id, user_id]
18 | //error on bad signature/parsing
19 | return &noopStream{
20 | incomingStreamName: streamName,
21 | }, nil
22 | }
23 |
24 | func (na *NoopApi) AllowView(streamName, salt string) (isAllowed bool) {
25 | return true
26 | }
27 |
28 | type noopStream struct {
29 | incomingStreamName string
30 | }
31 |
32 | func (nas *noopStream) StreamName() string {
33 | return nas.incomingStreamName
34 | }
35 |
36 | func (nas *noopStream) NotifyStreaming(si ktypes.StreamInfo) {
37 | }
38 |
39 | func (nas *noopStream) AllowStreaming() (isAllowed bool) {
40 | return true
41 | }
42 |
43 | func (vks *noopStream) Disconnect() {
44 | }
45 |
46 | func (na *NoopApi) Stat(isError bool, event string, context string, extra string) {
47 | }
48 |
49 | func (na *NoopApi) Serve() error {
50 | return nil
51 | }
52 |
53 | func (na *NoopApi) GetTranscoder() (ktypes.Abr, error) {
54 | return nil, errors.New("Not implemented")
55 | }
56 |
57 | func (na *NoopApi) ReadConfig(configPath string, configInterface interface{}) (interface{}, error) {
58 | if configPath == worker.DEFAULT_CONFIG {
59 | return configInterface, nil
60 | }
61 |
62 | c := configInterface.(worker.Config)
63 |
64 | if configPath == worker.DEV_CONFIG {
65 | c.LiveHlsConfig.HttpPort = 8085
66 | c.RtmpServerConfig.RtmpPort = 1935
67 | c.KfsConfig.Basedir = "/tmp/kive_ts/"
68 | return c, nil
69 | }
70 |
71 | configData, err := ioutil.ReadFile(configPath)
72 | if err != nil {
73 | return c, errors.Wrapf(err, "Bad config file %+v", configPath)
74 | }
75 |
76 | logrus.Infof("Config data %+v", string(configData))
77 |
78 | if meta, err := toml.DecodeFile(configPath, &c); err != nil || len(meta.Undecoded()) != 0 {
79 | if len(meta.Undecoded()) != 0 {
80 | logrus.Errorf("Cannot apply %v: ", meta.Undecoded())
81 | }
82 | return c, errors.Wrap(err, "cannot decode config")
83 | }
84 | return c, nil
85 | }
86 |
--------------------------------------------------------------------------------
/rtmp_server/config.go:
--------------------------------------------------------------------------------
1 | package rtmp_server
2 |
3 | import (
4 | "regexp"
5 | )
6 |
7 | type RtmpServerConfig struct {
8 | RtmpHost string
9 | RtmpPort int
10 | PublishPrefix string
11 | publishRegexp *regexp.Regexp
12 | }
13 |
14 | func NewRtmpServerConfig() RtmpServerConfig {
15 | res := RtmpServerConfig{
16 | RtmpHost: "",
17 | RtmpPort: 1935,
18 |
19 | PublishPrefix: "/(?P.*)/(?P[^?]*)",
20 | }
21 | res.publishRegexp = regexp.MustCompile(res.PublishPrefix)
22 |
23 | return res
24 | }
25 |
--------------------------------------------------------------------------------
/rtmp_server/requests.go:
--------------------------------------------------------------------------------
1 | package rtmp_server
2 |
3 | import (
4 | "github.com/VKCOM/joy4/av"
5 | "github.com/VKCOM/kive/ktypes"
6 | )
7 |
8 | //import (
9 | // "github.com/mitchellh/mapstructure"
10 | //)
11 |
12 | type PublishRequest struct {
13 | StreamHandler ktypes.Stream
14 | Application string `mapstructure:"app"`
15 | IncomingStreamName string `mapstructure:"incoming_stream_name"`
16 | StreamName string
17 | Params map[string]string
18 | Data av.DemuxCloser
19 | }
20 |
--------------------------------------------------------------------------------
/rtmp_server/rtmp_server.go:
--------------------------------------------------------------------------------
1 | package rtmp_server
2 |
3 | import (
4 | "fmt"
5 | "net"
6 |
7 | "github.com/VKCOM/joy4/format/flv"
8 | "github.com/VKCOM/joy4/format/rtmp"
9 | "github.com/mitchellh/mapstructure"
10 | "github.com/pkg/errors"
11 | "github.com/sirupsen/logrus"
12 | "github.com/VKCOM/kive/ktypes"
13 | "github.com/VKCOM/kive/vsync"
14 | "runtime/debug"
15 | "time"
16 | )
17 |
18 | func init() {
19 | flv.MaxProbePacketCount = 500 //long wait for audio and video
20 | rtmp.MaxChunkSize = 10 * 1024 * 1024
21 | }
22 |
23 | type RtmpServer struct {
24 | config RtmpServerConfig
25 | server *rtmp.Server
26 | rtmpListener *net.TCPListener
27 | HandlePublish func(*PublishRequest) error
28 | rtmpMutex *vsync.Semaphore
29 | streamMap *vsync.CheckedMap
30 | }
31 |
32 | type DeadLineConn struct {
33 | net.Conn
34 | extend time.Duration
35 | }
36 |
37 | func (dlc *DeadLineConn) Read(b []byte) (n int, err error) {
38 | dlc.SetDeadline(time.Now().Add(dlc.extend))
39 | return dlc.Conn.Read(b)
40 | }
41 |
42 | func (dlc *DeadLineConn) Write(b []byte) (n int, err error) {
43 | dlc.SetDeadline(time.Now().Add(dlc.extend))
44 | return dlc.Conn.Write(b)
45 | }
46 |
47 | func NewRtmpServer(config RtmpServerConfig) (*RtmpServer, error) {
48 | flv.MaxProbePacketCount = 500 //long wait for audio and video
49 | rtmp.MaxChunkSize = 10 * 1024 * 1024
50 |
51 | rts := &RtmpServer{
52 | config: config,
53 | rtmpMutex: vsync.NewSemaphore(500, 200),
54 | streamMap: vsync.NewCheckedMap(),
55 | }
56 |
57 | rtmpConnCreate := func(netconn net.Conn) *rtmp.Conn {
58 | logrus.Debugf("Connection created %+v", netconn.RemoteAddr())
59 | ktypes.Stat(false, "connection", "created", "")
60 |
61 | netconn.SetDeadline(time.Now().Add(1 * time.Minute))
62 | conn := rtmp.NewConn(&DeadLineConn{Conn: netconn, extend: 40 * time.Second})
63 | conn.Prober.HasVideo = true
64 | conn.Prober.HasAudio = true
65 | return conn
66 | }
67 |
68 | rtmpServer := &rtmp.Server{
69 | Addr: fmt.Sprintf("%s:%d", rts.config.RtmpHost, rts.config.RtmpPort),
70 | CreateConn: rtmpConnCreate,
71 | }
72 |
73 | rtmpServer.HandlePublish = func(conn *rtmp.Conn) {
74 | defer func() {
75 | if r := recover(); r != nil {
76 | logrus.Errorf("%s: %s", r, debug.Stack())
77 | }
78 | }()
79 | logrus.Debugf("Connection publish %+v", conn.NetConn().RemoteAddr())
80 | ktypes.Stat(false, "connection", "publish", "")
81 |
82 | defer ktypes.Stat(false, "connection", "close", "")
83 | defer conn.Close()
84 | logrus.Infof(conn.URL.RequestURI())
85 |
86 | publishRequest, err := rts.parsePublishRequest(conn.URL.RequestURI())
87 | if err != nil {
88 | logrus.Errorf(" %+v", err)
89 | return
90 | }
91 | publishRequest.Data = conn
92 | streamHandler, err := ktypes.ApiInst.OnPublish(publishRequest.IncomingStreamName, publishRequest.Application, publishRequest.Params)
93 | if err != nil {
94 | logrus.WithField("stream_name", publishRequest.StreamName).Errorf("Cannot publish %+v", err)
95 | return
96 | }
97 | defer streamHandler.Disconnect()
98 |
99 | lockSuccess := rts.streamMap.Lock(streamHandler.StreamName(), true)
100 | if !lockSuccess {
101 | logrus.WithField("stream_name", publishRequest.StreamName).Errorf("stream already running %+v", err)
102 | return
103 | }
104 | defer rts.streamMap.Unlock(streamHandler.StreamName())
105 |
106 | if !streamHandler.AllowStreaming() {
107 | logrus.WithField("stream_name", publishRequest.StreamName).Errorf("Streaming disabled %+v", publishRequest)
108 | return
109 | }
110 |
111 | if !rts.rtmpMutex.TryLock(8 * time.Second) {
112 | ktypes.Stat(false, "connection", "timedout", "")
113 | return
114 | }
115 | defer rts.rtmpMutex.Unlock()
116 |
117 | publishRequest.StreamHandler = streamHandler
118 | publishRequest.StreamName = streamHandler.StreamName()
119 |
120 | logrus.WithField("stream_name", publishRequest.StreamName).Infof("Streaming started %+v", publishRequest)
121 | err = rts.HandlePublish(publishRequest)
122 | logrus.WithField("stream_name", publishRequest.StreamName).Infof("Streaming stopped %+v, %+v", publishRequest, err)
123 | if err != nil {
124 | logrus.WithField("stream_name", publishRequest.StreamName).Errorf(" %+v", err)
125 | return
126 | }
127 |
128 | }
129 | rts.server = rtmpServer
130 |
131 | return rts, nil
132 | }
133 |
134 | func (rts *RtmpServer) HealthCheck(duration time.Duration) bool {
135 | if !rts.rtmpMutex.TryLock(duration) {
136 | return false
137 | }
138 | rts.rtmpMutex.Unlock()
139 | return true
140 | }
141 | func (rts *RtmpServer) parsePublishUrl(publishUrl string) (map[string]string, error) {
142 | result := make(map[string]string)
143 | match := rts.config.publishRegexp.FindStringSubmatch(publishUrl)
144 | indexes := rts.config.publishRegexp.SubexpNames()
145 | if len(indexes) != len(match) {
146 | return nil, errors.Errorf("bad publish request %+v", publishUrl)
147 | }
148 | for i, name := range indexes {
149 | if i != 0 && name != "" {
150 | result[name] = match[i]
151 | }
152 | }
153 | logrus.Debugf("rtmp paths: %+v", result)
154 | return result, nil
155 | }
156 |
157 | func (rts *RtmpServer) parsePublishRequest(url string) (*PublishRequest, error) {
158 | result := &PublishRequest{}
159 | vars, err := rts.parsePublishUrl(url)
160 | if err != nil {
161 | return result, errors.Wrap(err, "cannot parse publish url")
162 | }
163 |
164 | if err := mapstructure.Decode(vars, &result); err != nil {
165 | return result, errors.Wrap(err, "cannot decode publish url")
166 | }
167 | result.Params = vars
168 | logrus.Debugf("Publish parse %+v", result)
169 | return result, nil
170 |
171 | }
172 |
173 | type ListenerDeadLine struct {
174 | net.Listener
175 | }
176 |
177 | func (ldl *ListenerDeadLine) Accept() (net.Conn, error) {
178 | c, err := ldl.Listener.Accept()
179 | if err != nil {
180 | return nil, errors.Wrap(err, "cannot accept")
181 | }
182 | err = c.SetDeadline(time.Now().Add(10 * time.Second))
183 | if err != nil {
184 | return nil, errors.Wrap(err, "cannot set dedline")
185 | }
186 | return c, nil
187 | }
188 |
189 | func (rts *RtmpServer) Listen() error {
190 | listener, err := rts.server.Listen()
191 | if err != nil {
192 | errors.Wrap(err, "cannot listen rtmp")
193 | }
194 | rts.rtmpListener = listener
195 | return nil
196 | }
197 |
198 | func (rts *RtmpServer) Serve() error {
199 | err := rts.server.Serve(rts.rtmpListener)
200 | if err != nil {
201 | errors.Wrap(err, "cannot server rtmp")
202 | }
203 | return nil
204 | }
205 |
206 | func (rts *RtmpServer) Stop() error {
207 | return nil
208 | }
209 |
--------------------------------------------------------------------------------
/rtmp_server/rtmp_server_test.go:
--------------------------------------------------------------------------------
1 | package rtmp_server
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestParsePublishUrl(t *testing.T) {
10 | s := RtmpServer{
11 | config: NewRtmpServerConfig(),
12 | }
13 |
14 | tests := []struct {
15 | url string
16 | expected PublishRequest
17 | }{
18 | {
19 | url: "/source?publishsign=kek/gopro",
20 | expected: PublishRequest{
21 | Application: "source",
22 | IncomingStreamName: "gopro",
23 | Params: map[string]string{
24 | "app": "source",
25 | "publishsign": "kek",
26 | "incoming_stream_name": "gopro",
27 | },
28 | },
29 | },
30 | {
31 | url: "/live?publishsign=aWQ9aHE3dnR3dGJaQmVKR2dRYiZzaWduPWRNMG02d0YvZVM3Uy9YYkZ0YjNNRGc9PQ==/hq7vtwtbZBeJGgQb",
32 | expected: PublishRequest{
33 | Application: "live",
34 | IncomingStreamName: "hq7vtwtbZBeJGgQb",
35 | Params: map[string]string{
36 | "app": "live",
37 | "incoming_stream_name": "hq7vtwtbZBeJGgQb",
38 | "publishsign": "aWQ9aHE3dnR3dGJaQmVKR2dRYiZzaWduPWRNMG02d0YvZVM3Uy9YYkZ0YjNNRGc9PQ==",
39 | },
40 | },
41 | },
42 | }
43 |
44 | failingTests := []string{
45 | "awful_url",
46 | "/123?noStreamName=value",
47 | }
48 |
49 | for _, test := range tests {
50 | r, err := s.parsePublishRequest(test.url)
51 | assert.NoError(t, err)
52 | assert.Equal(t, test.expected, *r)
53 | }
54 |
55 | for _, test := range failingTests {
56 | _, err := s.parsePublishRequest(test)
57 | assert.Error(t, err)
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/test_assets/vk_sync.mp4:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3b824d6dde0f78e8a60453a073f26c0b6d22ad60446aade95b7337337b6ba2e5
3 | size 56978387
4 |
--------------------------------------------------------------------------------
/vsync/checked_map.go:
--------------------------------------------------------------------------------
1 | package vsync
2 |
3 | import "sync"
4 |
5 | type CheckedMap struct {
6 | names map[string]interface{}
7 | l sync.Mutex
8 | }
9 |
10 | func NewCheckedMap() *CheckedMap {
11 | return &CheckedMap{
12 | names: make(map[string]interface{}),
13 | }
14 | }
15 | func (cm *CheckedMap) Lock(name string, i interface{}) bool {
16 | cm.l.Lock()
17 | defer cm.l.Unlock()
18 | _, ok := cm.names[name]
19 | if ok {
20 | return false
21 | } else {
22 | cm.names[name] = i
23 | return true
24 | }
25 | }
26 |
27 | func (cm *CheckedMap) Unlock(name string) {
28 | cm.l.Lock()
29 | defer cm.l.Unlock()
30 | delete(cm.names, name)
31 | }
32 |
--------------------------------------------------------------------------------
/vsync/mutex.go:
--------------------------------------------------------------------------------
1 | package vsync
2 |
3 | import (
4 | "sync/atomic"
5 | "time"
6 | )
7 |
8 | type Semaphore struct {
9 | c chan struct{}
10 | waiterSlot int32
11 | }
12 |
13 | func NewSemaphore(max uint, maxWaiters uint) *Semaphore {
14 | return &Semaphore{c: make(chan struct{}, max), waiterSlot: int32(maxWaiters + max)}
15 | }
16 |
17 | func (m *Semaphore) Lock() {
18 | atomic.AddInt32(&m.waiterSlot, -1)
19 | m.c <- struct{}{}
20 | }
21 |
22 | func (m *Semaphore) Unlock() {
23 | atomic.AddInt32(&m.waiterSlot, 1)
24 | <-m.c
25 | }
26 |
27 | func (m *Semaphore) TryLock(timeout time.Duration) bool {
28 | slotsLeft := atomic.AddInt32(&m.waiterSlot, -1)
29 | if slotsLeft < 0 {
30 | atomic.AddInt32(&m.waiterSlot, 1)
31 | return false
32 | }
33 |
34 | select {
35 | case m.c <- struct{}{}:
36 | return true
37 | case <-time.After(timeout):
38 | atomic.AddInt32(&m.waiterSlot, 1)
39 | }
40 | return false
41 | }
42 |
--------------------------------------------------------------------------------
/worker/config.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "github.com/VKCOM/joy4/format/rtmp"
5 | "github.com/sirupsen/logrus"
6 | "github.com/VKCOM/kive/hls_server"
7 | "github.com/VKCOM/kive/kfs"
8 | "github.com/VKCOM/kive/ktypes"
9 | "github.com/VKCOM/kive/rtmp_server"
10 | )
11 |
12 | const (
13 | DEFAULT_CONFIG = "default"
14 | TESTING_CONFIG = "testing"
15 | DEV_CONFIG = "development"
16 | )
17 |
18 | type Config struct {
19 | LogLevel string
20 | RtmpDebug bool
21 | KfsConfig kfs.KfsConfig
22 | LiveHlsConfig hls_server.LiveHlsConfig
23 | RtmpServerConfig rtmp_server.RtmpServerConfig
24 | FfmpegBinary string
25 | }
26 |
27 | func NewConfig(configPath string) Config {
28 | logrus.Infof("Starting with config path %+s", configPath)
29 | config := Config{
30 | LogLevel: "debug",
31 | KfsConfig: kfs.NewKfsConfig(),
32 | LiveHlsConfig: hls_server.NewLiveHlsConfig(),
33 | RtmpServerConfig: rtmp_server.NewRtmpServerConfig(),
34 | }
35 |
36 | configInterface, err := ktypes.ApiInst.ReadConfig(configPath, config)
37 |
38 | if err != nil {
39 | logrus.Panicf("Cannot init config %+v", err)
40 | }
41 |
42 | config = configInterface.(Config)
43 | if config.RtmpDebug {
44 | rtmp.Debug = true
45 | }
46 |
47 | switch config.LogLevel {
48 | case "info":
49 | logrus.SetLevel(logrus.InfoLevel)
50 | case "debug":
51 | logrus.SetLevel(logrus.DebugLevel)
52 | default:
53 | logrus.Panicf("Bad log level: %s:", config.LogLevel)
54 | }
55 | logrus.Infof("Final config: %+v ", config)
56 |
57 | return config
58 | }
59 |
--------------------------------------------------------------------------------
/worker/kive_test.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "os/exec"
7 | "path/filepath"
8 | "runtime"
9 | "testing"
10 | "time"
11 |
12 | "github.com/sirupsen/logrus"
13 | "github.com/stretchr/testify/assert"
14 | )
15 |
16 | func TestKiveDvr(t *testing.T) {
17 | logrus.SetLevel(logrus.DebugLevel)
18 | c := NewConfig(TESTING_CONFIG)
19 | worker, err := NewWorker(c)
20 | if err != nil {
21 | panic(err)
22 | }
23 |
24 | err = worker.Listen()
25 | if err != nil {
26 | panic(err)
27 | }
28 |
29 | err = worker.Serve()
30 | if err != nil {
31 | panic(err)
32 | }
33 |
34 | defer worker.Stop()
35 |
36 | inFile := "t4.mp4"
37 | outFile := inFile
38 |
39 | streamName := "test123"
40 | pubUrl := fmt.Sprintf("%s%d%s%s", "rtmp://127.0.0.1:", worker.Config.RtmpServerConfig.RtmpPort, "/live?publishsign=aWQ9SUJwYXVTdGRrTlY0NmtRRCZzaWduPTZiQ0hHMG9wa1U1S0dhMFE5bkhrcFE9PQ==/", streamName)
41 |
42 | from := time.Now().Unix()
43 | duration := 35
44 | playUrl := fmt.Sprintf("%s%d/live/%s/playlist_dvr_range-%d-%d.m3u8", "http://127.0.0.1:", worker.Config.LiveHlsConfig.HttpPort, streamName, from, duration)
45 |
46 | equal, err := streamAndCheck(t, inFile, outFile, pubUrl, playUrl)
47 |
48 | if err != nil {
49 | logrus.Errorf("%+v", err)
50 | }
51 |
52 | assert.NoError(t, err)
53 | assert.True(t, equal)
54 |
55 | time.Sleep(time.Second)
56 | }
57 |
58 | func TestKiveDvrTranscode(t *testing.T) {
59 | logrus.SetLevel(logrus.DebugLevel)
60 | c := NewConfig(TESTING_CONFIG)
61 | worker, err := NewWorker(c)
62 | if err != nil {
63 | panic(err)
64 | }
65 |
66 | err = worker.Listen()
67 | if err != nil {
68 | panic(err)
69 | }
70 |
71 | err = worker.Serve()
72 | if err != nil {
73 | panic(err)
74 | }
75 |
76 | defer worker.Stop()
77 |
78 | inFile := "t4.mp4"
79 | outFile := inFile
80 |
81 | streamName := "test123"
82 | from := time.Now().Unix()
83 | duration := 35
84 |
85 | pubUrl := fmt.Sprintf("%s%d%s%s", "rtmp://127.0.0.1:", worker.Config.RtmpServerConfig.RtmpPort, "/abr?publishsign=aWQ9SUJwYXVTdGRrTlY0NmtRRCZzaWduPTZiQ0hHMG9wa1U1S0dhMFE5bkhrcFE9PQ==/", streamName)
86 |
87 | err = stream(t, inFile, pubUrl)
88 | assert.NoError(t, err)
89 |
90 | testSizes := []string{"1080p", "720p", "480p", "360p", "source"}
91 |
92 | for _, testSize := range testSizes {
93 | playUrl := fmt.Sprintf("%s%d/liveabr/%s/abr/%s_%s/playlist_dvr_range-%d-%d.m3u8", "http://127.0.0.1:", worker.Config.LiveHlsConfig.HttpPort, streamName, streamName, testSize, from, duration)
94 |
95 | equal, err := check(t, inFile, outFile, playUrl)
96 | if err != nil {
97 | logrus.Errorf("%+v", err)
98 | }
99 |
100 | assert.NoError(t, err)
101 | assert.True(t, equal)
102 | }
103 | time.Sleep(time.Second)
104 | }
105 |
106 | func TestKiveLive(t *testing.T) {
107 | logrus.SetLevel(logrus.DebugLevel)
108 | c := NewConfig(TESTING_CONFIG)
109 | worker, err := NewWorker(c)
110 | if err != nil {
111 | panic(err)
112 | }
113 |
114 | err = worker.Listen()
115 | if err != nil {
116 | panic(err)
117 | }
118 |
119 | err = worker.Serve()
120 | if err != nil {
121 | panic(err)
122 | }
123 |
124 | defer worker.Stop()
125 |
126 | inFile := "t4.mp4"
127 | outFile := inFile
128 |
129 | streamName := "test123"
130 | pubUrl := fmt.Sprintf("%s%d%s%s", "rtmp://127.0.0.1:", worker.Config.RtmpServerConfig.RtmpPort, "/live?publishsign=aWQ9SUJwYXVTdGRrTlY0NmtRRCZzaWduPTZiQ0hHMG9wa1U1S0dhMFE5bkhrcFE9PQ==/", streamName)
131 |
132 | playUrl := fmt.Sprintf("%s%d/live/%s/playlist.m3u8", "http://127.0.0.1:", worker.Config.LiveHlsConfig.HttpPort, streamName)
133 |
134 | equal, err := streamAndCheck(t, inFile, outFile, pubUrl, playUrl)
135 |
136 | if err != nil {
137 | logrus.Errorf("%+v", err)
138 | }
139 |
140 | assert.NoError(t, err)
141 | assert.True(t, equal)
142 |
143 | time.Sleep(time.Second)
144 | }
145 |
146 | func stream(t *testing.T, inFile, pubUrl string) error {
147 | inFile = composeInFilename(inFile)
148 | cmd, err := exec.Command("ffmpeg", "-re", "-i", inFile, "-c", "copy", "-f", "flv", pubUrl).CombinedOutput()
149 | if err != nil {
150 | logrus.Fatalf("stream: %s %+v", cmd, err)
151 | return err
152 | }
153 | assert.NoError(t, err)
154 | return nil
155 | }
156 |
157 | func check(t *testing.T, inFile, outFile, playUrl string) (bool, error) {
158 | inFile = composeInFilename(inFile)
159 | inMd5, err := exec.Command("ffmpeg", "-y", "-i", inFile, "-c", "copy", "-map", "0:a", "-f", "md5", "-").Output()
160 |
161 | if err != nil {
162 | logrus.Errorf("%+v", err)
163 | }
164 | outFile = composeOutFilename(outFile)
165 |
166 | logrus.Info(playUrl, " : ", outFile)
167 | cmd, err := exec.Command("ffmpeg", "-y", "-i", playUrl, "-c", "copy", "-f", "mp4", outFile).CombinedOutput()
168 |
169 | if err != nil {
170 | logrus.Fatalf("%s %+v", cmd, err)
171 | return false, err
172 | }
173 |
174 | outMd5, err := exec.Command("ffmpeg", "-y", "-i", outFile, "-c", "copy", "-map", "0:a", "-f", "md5", "-").Output()
175 |
176 | if err != nil {
177 | logrus.Fatalf("%s %+v", cmd, err)
178 | return false, err
179 | }
180 | return bytes.Equal(inMd5, outMd5), err
181 | }
182 |
183 | func streamAndCheck(t *testing.T, inFile, outFile, pubUrl, playUrl string) (bool, error) {
184 | if err := stream(t, inFile, pubUrl); err != nil {
185 | return false, err
186 | }
187 |
188 | return check(t, inFile, outFile, playUrl) //should good compare here, not just audio
189 | }
190 |
191 | func composeInFilename(inFile string) string {
192 | return composeFilename(inFile, "/../test_data/")
193 | }
194 |
195 | func composeOutFilename(outFile string) string {
196 | return composeFilename(outFile, "/../test_result/")
197 | }
198 |
199 | func composeFilename(Name, infix string) string {
200 | _, filename, _, _ := runtime.Caller(0)
201 |
202 | return filepath.Dir(filename) + infix + Name
203 | }
204 |
--------------------------------------------------------------------------------
/worker/kive_worker.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "io/ioutil"
9 | "math/rand"
10 | "net/http"
11 | "strings"
12 | "time"
13 |
14 | "github.com/VKCOM/joy4/av"
15 | "github.com/VKCOM/joy4/av/avutil"
16 | "github.com/mihail812/m3u8"
17 | "github.com/pkg/errors"
18 | "github.com/sirupsen/logrus"
19 |
20 | "github.com/VKCOM/kive/hls_server"
21 | "github.com/VKCOM/kive/kfs"
22 | "github.com/VKCOM/kive/ktypes"
23 | "github.com/VKCOM/kive/media"
24 | "github.com/VKCOM/kive/rtmp_server"
25 | "math"
26 | "sort"
27 | )
28 |
29 | type Worker struct {
30 | storage *kfs.Filesystem
31 | hlsServer *hls_server.LiveHls
32 | rtmpServer *rtmp_server.RtmpServer
33 | Config Config
34 |
35 | desiredOutputSizes []int
36 | streamTypeToSize map[ktypes.StreamType]int
37 | sizeToStreamType map[int]ktypes.StreamType
38 | }
39 |
40 | func NewWorker(config Config) (*Worker, error) {
41 | worker := Worker{
42 | Config: config,
43 | }
44 | storage, err := kfs.NewFilesystem(config.KfsConfig)
45 | if err != nil {
46 | return nil, errors.Wrap(err, "cannot create file storage")
47 | }
48 | worker.storage = storage
49 |
50 | hlsServer, err := hls_server.NewLiveHls(config.LiveHlsConfig)
51 | if err != nil {
52 | return nil, errors.Wrap(err, "cannot create hls server")
53 | }
54 |
55 | hlsServer.HandleVodChunk = worker.handleVodChunk
56 | hlsServer.HandleVodManifest = worker.handleVodManifest
57 | hlsServer.HandleLiveChunk = worker.handleLiveChunk
58 | hlsServer.HandleLivePlaylist = worker.handleLivePlayList
59 | hlsServer.HandleDvrChunk = worker.handleDvrChunk
60 | hlsServer.HandleDvrPlayList = worker.handleDvrPlayList
61 |
62 | worker.hlsServer = hlsServer
63 |
64 | if err != nil {
65 | return nil, errors.Wrap(err, "cannot create playlist storage")
66 | }
67 |
68 | rtmpServer, err := rtmp_server.NewRtmpServer(config.RtmpServerConfig)
69 | if err != nil {
70 | return nil, errors.Wrap(err, "cannot create rtmp server")
71 | }
72 | rtmpServer.HandlePublish = worker.handlePublish
73 | worker.rtmpServer = rtmpServer
74 | hlsServer.HandleRtmpHealth = rtmpServer.HealthCheck
75 |
76 | worker.desiredOutputSizes = worker.composeDesiredSizes()
77 |
78 | return &worker, nil
79 | }
80 |
81 | func (w *Worker) Listen() error {
82 | err := w.rtmpServer.Listen()
83 | if err != nil {
84 | return errors.Wrap(err, "cannot listen rtmp")
85 | }
86 |
87 | err = w.hlsServer.Listen()
88 | if err != nil {
89 | return errors.Wrap(err, "cannot listen hls ")
90 | }
91 |
92 | return nil
93 | }
94 |
95 | func (w *Worker) Serve() error {
96 | go func() {
97 | err := w.rtmpServer.Serve()
98 | if err != nil {
99 | logrus.Panicf("cannot serve %+v", err)
100 | }
101 | time.Sleep(30 * time.Second)
102 | panic("serve failed")
103 | }()
104 |
105 | go func() {
106 | err := w.hlsServer.Serve()
107 | if err != nil {
108 | logrus.Panicf("cannot serve %+v", err)
109 | }
110 | }()
111 |
112 | return nil
113 | }
114 |
115 | func (w *Worker) Stop() error {
116 | err := w.rtmpServer.Stop()
117 | if err != nil {
118 | logrus.Errorf("cannot stop %+v", err)
119 | }
120 | w.storage.Finalize()
121 | err = w.hlsServer.Stop()
122 | if err != nil {
123 | logrus.Errorf("cannot stop %+v", err)
124 | }
125 | return nil
126 | }
127 |
128 | func (w *Worker) handleLiveMasterPlayList(r *hls_server.LivePlaylistRequest) (hls_server.HttpResponse, error) {
129 | pl := m3u8.NewMasterPlaylist()
130 |
131 | return hls_server.HttpResponse{
132 | HttpStatus: http.StatusOK,
133 | Reader: ioutil.NopCloser(strings.NewReader(pl.String())),
134 | }, nil
135 | }
136 |
137 | func (w *Worker) handleLivePlayList(r *hls_server.LivePlaylistRequest) (hls_server.HttpResponse, error) {
138 | var livePlaylistLen = int(4)
139 | var liveTimeoutLimit = ktypes.UnixMs(50 * 1000)
140 |
141 | if r.StreamType == "" && (r.Application == "kiveabr") {
142 | res, err := w.storage.Md.GetLast(r.StreamName, string(w.sizeToStreamType[w.getLowestTranscoderSize()]), livePlaylistLen, liveTimeoutLimit) // falback for source playback
143 | if err != nil || len(res) == 0 {
144 | r.StreamType = string(ktypes.SOURCE)
145 | } else {
146 | return w.handleMasterPlaylist(r.StreamName, "chunks.m3u8")
147 | }
148 | } else if r.Application != "kiveabr" {
149 | r.StreamType = string(ktypes.SOURCE)
150 | }
151 |
152 | if !ktypes.ApiInst.AllowView(r.StreamName, r.ViewSalt) {
153 | return hls_server.HttpResponse{HttpStatus: http.StatusForbidden}, errors.New("forbiden")
154 | }
155 |
156 | pl, err := m3u8.NewMediaPlaylist(0, uint(livePlaylistLen))
157 | if err != nil {
158 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "cannot creale playlist")
159 | }
160 |
161 | res, err := w.storage.Md.GetLast(r.StreamName, string(r.StreamType), livePlaylistLen, liveTimeoutLimit)
162 | if err != nil {
163 | logrus.Errorf("%+v", err)
164 | }
165 |
166 | logrus.Infof("livePlaylist: %+v %+v %+v", res, r.StreamName, r.StreamType)
167 | lcr := hls_server.LiveChunkRequest{
168 | StreamName: r.StreamName,
169 | Application: r.Application,
170 | StreamNameChunk: r.StreamName,
171 | }
172 |
173 | for _, i := range res {
174 | lcr.ChunkName = i.BuildChunkName()
175 | pl.Append(w.hlsServer.BuildLiveChunkName(&lcr), float64(i.Duration)/float64(time.Second), "")
176 | if i.Discontinuity != 0 {
177 | pl.SetDiscontinuity()
178 | }
179 | }
180 |
181 | if len(res) != 0 {
182 | pl.SeqNo = uint64(res[len(res)-1].SeqId - int64(len(res)))
183 | } else {
184 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "empty_playlist")
185 | }
186 |
187 | return hls_server.HttpResponse{
188 | HttpStatus: http.StatusOK,
189 | Reader: ioutil.NopCloser(strings.NewReader(pl.String())),
190 | }, nil
191 | }
192 |
193 | func (w *Worker) handleMasterPlaylist(streamName, playlistSufix string) (hls_server.HttpResponse, error) {
194 | pl := m3u8.NewMasterPlaylist()
195 | sizes := append([]int{}, w.desiredOutputSizes...)
196 | sort.Sort(sort.IntSlice(sizes))
197 | for it, desireSize := range sizes {
198 | pl.Append(w.ComposeFullMediaPlaylistName(streamName, playlistSufix, desireSize), nil, m3u8.VariantParams{
199 | ProgramId: uint32(it),
200 | Bandwidth: uint32(4 * 1024 * desireSize),
201 | Resolution: fmt.Sprintf("%dx%d", int(math.Round(1.7777777*float64(desireSize))), desireSize)})
202 | }
203 | return hls_server.HttpResponse{
204 | HttpStatus: http.StatusOK,
205 | Reader: ioutil.NopCloser(strings.NewReader(pl.String())),
206 | }, nil
207 | }
208 |
209 | func (w *Worker) handleLiveChunk(r *hls_server.LiveChunkRequest) (hls_server.HttpResponse, error) {
210 | key, err := ktypes.ParseChunkName(r.ChunkName)
211 | if err != nil {
212 | return hls_server.HttpResponse{HttpStatus: http.StatusBadRequest}, errors.Wrap(err, "bad params")
213 | }
214 |
215 | reader, err := w.storage.Reader(key)
216 | if err != nil {
217 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "no such stream")
218 | }
219 |
220 | return hls_server.HttpResponse{
221 | HttpStatus: http.StatusOK,
222 | Reader: reader,
223 | }, nil
224 | }
225 | func (w *Worker) handleDvrPlayList(r *hls_server.DvrPlaylistRequest) (hls_server.HttpResponse, error) {
226 | fromUnixMs := ktypes.UnixMs(r.From * 1000)
227 | toUnixMs := ktypes.UnixMs(r.From*1000 + r.Duration*1000)
228 | if r.StreamType == "" && (r.Application == "kiveabr") {
229 | res, err := w.storage.Md.Walk(r.StreamName, w.sizeToStreamType[w.getLowestTranscoderSize()], fromUnixMs, toUnixMs)
230 | if err != nil || len(res) == 0 {
231 | r.StreamType = string(ktypes.SOURCE)
232 | } else {
233 | return w.handleMasterPlaylist(r.StreamName, w.hlsServer.BuildDvrChunksPlaylist(r))
234 | }
235 | } else if r.Application != "kiveabr" {
236 | r.StreamType = string(ktypes.SOURCE)
237 | }
238 |
239 | res, err := w.storage.Md.Walk(r.StreamName, ktypes.StreamType(r.StreamType), fromUnixMs, toUnixMs)
240 | if err != nil {
241 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "bad params")
242 | }
243 |
244 | if len(res) == 0 {
245 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "empty_playlist")
246 | }
247 |
248 | pl, err := m3u8.NewMediaPlaylist(0, uint(len(res)))
249 | if err != nil {
250 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "cannot creale playlist")
251 | }
252 |
253 | pl.MediaType = m3u8.VOD
254 |
255 | dcr := hls_server.DvrChunkRequest{
256 | StreamName: r.StreamName,
257 | Application: r.Application,
258 | StreamNameChunk: r.StreamName,
259 | }
260 | for _, i := range res {
261 | dcr.ChunkName = i.BuildChunkName()
262 | pl.Append(w.hlsServer.BuildDvrChunkName(&dcr), float64(i.Duration)/float64(time.Second), "")
263 | if i.Discontinuity != 0 {
264 | pl.SetDiscontinuity()
265 | }
266 | }
267 | pl.Close()
268 |
269 | return hls_server.HttpResponse{
270 | HttpStatus: http.StatusOK,
271 | Reader: ioutil.NopCloser(strings.NewReader(pl.String())),
272 | }, nil
273 | }
274 |
275 | func (w *Worker) handleDvrChunk(r *hls_server.DvrChunkRequest) (hls_server.HttpResponse, error) {
276 | key, err := ktypes.ParseChunkName(r.ChunkName)
277 | if err != nil {
278 | return hls_server.HttpResponse{HttpStatus: http.StatusBadRequest}, errors.Wrap(err, "bad params")
279 | }
280 |
281 | reader, err := w.storage.Reader(key)
282 | if err != nil {
283 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "no such stream")
284 | }
285 |
286 | return hls_server.HttpResponse{
287 | HttpStatus: http.StatusOK,
288 | Reader: reader,
289 | }, nil
290 | }
291 |
292 | func (w *Worker) handleVodManifest(r *hls_server.VodManifestRequest) (hls_server.HttpResponse, error) {
293 | res, err := w.storage.Md.Walk(r.StreamName, ktypes.SOURCE, ktypes.UnixMs(r.From*1000), ktypes.UnixMs(r.From*1000+r.Duration*1000))
294 | if err != nil {
295 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "bad params")
296 | }
297 | if len(res) == 0 {
298 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.New("Zero len")
299 | }
300 | logrus.Infof("Vod manifest len %d", len(res))
301 |
302 | pl, err := m3u8.NewMediaPlaylist(0, uint(len(res)))
303 | if err != nil {
304 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "bad params")
305 | }
306 |
307 | pl.MediaType = m3u8.VOD
308 |
309 | vodManifest := VodManifest{}
310 | vodChunks := make([]ManifestEntry, 0)
311 |
312 | dcr := hls_server.DvrChunkRequest{
313 | StreamName: r.StreamName,
314 | Application: "live",
315 | StreamNameChunk: r.StreamName,
316 | }
317 |
318 | vodManifest.DeletionChunks = make([]ktypes.ChunkInfo, 0)
319 | duration := 0.
320 | for _, i := range res {
321 | playlistPathName := fmt.Sprintf("%s/%s", i.HourString(), i.BuildVodChunkName())
322 | vodManifestEntry := ManifestEntry{
323 | Size: i.Size,
324 | Fullname: playlistPathName,
325 | Directory: i.HourString(),
326 | Duration: i.Duration,
327 | }
328 | chunkDuration := float64(i.Duration) / float64(time.Second)
329 | duration += chunkDuration
330 |
331 | dcr.ChunkName = i.BuildChunkName()
332 | vodManifestEntry.DownloadUrl = w.hlsServer.BuildDvrChunkName(&dcr)
333 | vodManifestEntry.DvrChunkName = vodManifestEntry.DownloadUrl
334 | pl.Append(
335 | playlistPathName,
336 | chunkDuration,
337 | "",
338 | )
339 | if i.Discontinuity != 0 {
340 | pl.SetDiscontinuity()
341 | }
342 | vodManifestEntry.ChunkInfo = i
343 | vodManifestEntry.DvrChunkRequest = dcr
344 | vodChunks = append(vodChunks, vodManifestEntry)
345 | vodManifest.DeletionChunks = append(vodManifest.DeletionChunks, i)
346 | }
347 | pl.Close()
348 | vodManifest.Playlist = pl.String()
349 | vodManifest.Chunks = vodChunks
350 | vodManifest.Duration = int64(duration)
351 | vodManifest.StreamName = r.StreamName
352 | vodManifest.From = r.From
353 |
354 | b, err := json.Marshal(vodManifest)
355 | if err != nil {
356 | return hls_server.HttpResponse{HttpStatus: http.StatusInternalServerError}, errors.Wrapf(err, "cannot encode %+v", vodManifest)
357 | }
358 |
359 | return hls_server.HttpResponse{
360 | HttpStatus: http.StatusOK,
361 | Reader: ioutil.NopCloser(bytes.NewReader(b)),
362 | }, nil
363 | }
364 |
365 | func (w *Worker) handleVodChunk(r *hls_server.VodChunkRequest) (hls_server.HttpResponse, error) {
366 | key, err := ktypes.ParseChunkName(r.ChunkName)
367 | if err != nil {
368 | return hls_server.HttpResponse{HttpStatus: http.StatusBadRequest}, errors.Wrap(err, "bad params")
369 | }
370 |
371 | reader, err := w.storage.Reader(key)
372 | if err != nil {
373 | return hls_server.HttpResponse{HttpStatus: http.StatusNotFound}, errors.Wrap(err, "no such stream")
374 | }
375 |
376 | return hls_server.HttpResponse{
377 | HttpStatus: http.StatusOK,
378 | Reader: reader,
379 | }, nil
380 | }
381 |
382 | func (w *Worker) handlePublish(request *rtmp_server.PublishRequest) error {
383 | switch request.Application {
384 | case "live":
385 | return w.handlePublishInner(request.Data, ktypes.SOURCE, request, nil)
386 | case "abr":
387 |
388 | transcoder, err := ktypes.ApiInst.GetTranscoder()
389 | if err != nil {
390 | return errors.Wrap(err, "failed intialize transcoder")
391 | }
392 |
393 | defer transcoder.Close()
394 |
395 | demuxers, actualSizes, err := transcoder.Init(w.desiredOutputSizes, request.Data, request.StreamName)
396 | if err != nil {
397 | return errors.Wrap(err, "failed intialize transcoder")
398 | }
399 |
400 | demux := request.Data
401 |
402 | for idx := range demuxers {
403 | request.Application = "live"
404 | demuxer := demuxers[idx]
405 | go func() {
406 | err := w.handlePublishInner(demuxer, ktypes.StreamType(demuxer.Desc()), request, actualSizes)
407 | if err != nil {
408 | logrus.WithField("stream_name", request.StreamName).Errorf("cannot write chunk in demuxer %s: %+v", demuxer.Desc(), err)
409 | }
410 | }()
411 | }
412 | return avutil.CopyPackets(transcoder, demux)
413 | default:
414 | return errors.New("not supported")
415 | }
416 | }
417 |
418 | func (w *Worker) handlePublishInner(demux av.DemuxCloser, streamType ktypes.StreamType, request *rtmp_server.PublishRequest, actualSizes []int) error {
419 | if !request.StreamHandler.AllowStreaming() {
420 | return errors.Errorf("Streaming is forbidden %+v ", request)
421 | }
422 |
423 | chunker := media.NewSegmentDemuxer(demux, 3100*time.Millisecond)
424 |
425 | streamToSeqId := make(map[int]int64)
426 | virtualStreams, _ := composeVirtual(w.streamTypeToSize[streamType], w.desiredOutputSizes, actualSizes)
427 | for _, vStream := range virtualStreams {
428 | streamToSeqId[vStream] = w.getLastSeqId(request.StreamName, w.sizeToStreamType[vStream])
429 | }
430 |
431 | key := ktypes.NewChunkInfo(request.StreamName, streamType)
432 | key.SeqId = w.getLastSeqId(request.StreamName, streamType)
433 | keyOut := &key
434 |
435 | for {
436 | keyOut, err := w.writeChunk(keyOut, request, chunker)
437 | if err == nil && keyOut != nil {
438 | w.writeVirtualChunk(*keyOut, streamToSeqId, virtualStreams)
439 | }
440 | key.Discontinuity = 0
441 | if err == io.EOF {
442 | break
443 | } else if err != nil {
444 | return errors.Wrap(err, "cannot write chunk")
445 | }
446 | }
447 |
448 | return nil
449 | }
450 |
451 | func (w *Worker) writeChunk(key *ktypes.ChunkInfo, request *rtmp_server.PublishRequest, chunker *media.SegmentDemuxer) (*ktypes.ChunkInfo, error) {
452 | key.SeqId = key.SeqId + 1
453 | key.Ts = ktypes.TimeToMillis(time.Now())
454 | key.Rand = rand.Int()
455 | key.StreamDesc = "nodesc"
456 | key.Virtual = false
457 | key.VirtualStreamType = ktypes.SOURCE
458 | writer, err := w.storage.Writer(*key)
459 | if err != nil {
460 | return nil, errors.Wrap(err, "cannot get storage")
461 | }
462 | defer writer.Close()
463 |
464 | si, err := chunker.WriteNext(writer)
465 | writer.SetChunkDuration(si.Duration)
466 | key.Duration = si.Duration
467 |
468 | if err != nil && err != io.EOF {
469 | return nil, errors.Wrap(err, "cannot chunk")
470 | }
471 |
472 | if key.StreamType == ktypes.SOURCE {
473 | request.StreamHandler.NotifyStreaming(ktypes.StreamInfo{
474 | Width: si.Width,
475 | Height: si.Height,
476 | ChunkStartTime: key.Ts,
477 | })
478 | }
479 | return key, err
480 | }
481 |
482 | func (w *Worker) writeVirtualChunk(key ktypes.ChunkInfo, streamToSeqId map[int]int64, virtualStreams []int) {
483 | for _, vStream := range virtualStreams {
484 | key.Virtual = true
485 | key.SeqId = streamToSeqId[vStream] + 1
486 | streamToSeqId[vStream] = key.SeqId
487 | key.VirtualStreamType = ktypes.StreamType(w.sizeToStreamType[vStream])
488 | w.storage.WriteMeta(key)
489 | }
490 | }
491 |
492 | func (w *Worker) getLastSeqId(streamName string, streamType ktypes.StreamType) (seqId int64) {
493 | chunks, err := w.storage.Md.GetLast(streamName, string(streamType), 1, 3600*1000)
494 | seqId = 0
495 | if err == nil && len(chunks) != 0 {
496 | seqId = chunks[0].SeqId
497 | }
498 | return seqId
499 | }
500 |
501 | func (w *Worker) GetChunk(streamName string, streamType ktypes.StreamType, from ktypes.UnixMs, to ktypes.UnixMs) ([]byte, error) {
502 | res, err := w.storage.Md.Walk(streamName, streamType, from, to)
503 | if err != nil {
504 | return nil, errors.Wrap(err, "cannot get chunk list")
505 | }
506 |
507 | if len(res) == 0 {
508 | return nil, errors.Wrap(err, "no chunks")
509 | }
510 | chunkReader, err := w.storage.Reader(res[0])
511 |
512 | if err != nil {
513 | return nil, errors.Wrap(err, "cannot get chunk")
514 | }
515 | defer chunkReader.Close()
516 |
517 | tsData := bytes.NewBuffer(make([]byte, 0))
518 | n, err := io.Copy(tsData, chunkReader)
519 |
520 | logrus.Debugf("Copied %d bytes for image", n)
521 | if err != nil {
522 | return nil, errors.Wrap(err, "cannot get chunk")
523 | }
524 | return tsData.Bytes(), nil
525 | }
526 |
527 | func (w *Worker) DeleteChunk(ci ktypes.ChunkInfo) error {
528 | return w.storage.Delete(ci)
529 | }
530 |
531 | func (w *Worker) composeDesiredSizes() []int {
532 | sizes := make([]int, 0, 0)
533 | w.streamTypeToSize = make(map[ktypes.StreamType]int)
534 | w.sizeToStreamType = make(map[int]ktypes.StreamType)
535 | w.streamTypeToSize["1080p"] = 1080
536 | w.streamTypeToSize["720p"] = 720
537 | w.streamTypeToSize["480p"] = 480
538 | w.streamTypeToSize["360p"] = 360
539 | for key, value := range w.streamTypeToSize {
540 | w.sizeToStreamType[value] = key
541 | sizes = append(sizes, value)
542 | }
543 | sort.Ints(sizes)
544 | return sizes
545 | }
546 |
547 | func (w *Worker) getLowestTranscoderSize() int {
548 | min := w.desiredOutputSizes[0]
549 | for _, size := range w.desiredOutputSizes {
550 | if size < min {
551 | min = size
552 | }
553 | }
554 | return min
555 | }
556 |
557 | func (w *Worker) ComposeFullMediaPlaylistName(streamName, playlistSufix string, size int) string {
558 | return fmt.Sprintf("%s/%s", w.sizeToStreamType[size], playlistSufix)
559 | }
560 |
561 | func hasVideoSize(desiredSize int, sizes []int) bool {
562 | for _, size := range sizes {
563 | if size == desiredSize {
564 | return true
565 | }
566 | }
567 | return false
568 | }
569 |
570 | func findSizePosition(desiredSize int, sizes []int) (int, error) {
571 | for el, size := range sizes {
572 | if size == desiredSize {
573 | return el, nil
574 | }
575 | }
576 | return 0, errors.New("size_not_found")
577 | }
578 |
579 | func composeVirtual(size int, desiredSizes, actualSizes []int) ([]int, error) {
580 | desiredSizePos, err := findSizePosition(size, desiredSizes)
581 | if err != nil {
582 | return nil, err
583 | }
584 | result := make([]int, 0, 0)
585 | for i := desiredSizePos + 1; i < len(desiredSizes); i++ {
586 | if hasVideoSize(desiredSizes[i], actualSizes) == true {
587 | return result, nil
588 | }
589 | result = append(result, desiredSizes[i])
590 | }
591 | return result, nil
592 | }
593 |
--------------------------------------------------------------------------------
/worker/kive_worker_test.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | func Test_composeVirtual(t *testing.T) {
9 | result, _ := composeVirtual(240, []int{240, 480, 720, 1080}, []int{240})
10 | if reflect.DeepEqual([]int{480, 720, 1080}, result) == false {
11 | t.Errorf("%+v", result)
12 | }
13 |
14 | result, _ = composeVirtual(240, []int{240, 480, 720, 1080}, []int{240, 720})
15 | if reflect.DeepEqual([]int{480}, result) == false {
16 | t.Errorf("%+v", result)
17 | }
18 |
19 | result, _ = composeVirtual(240, []int{240, 480, 720, 1080}, []int{240, 480})
20 | if reflect.DeepEqual([]int{}, result) == false {
21 | t.Errorf("%+v", result)
22 | }
23 |
24 | result, _ = composeVirtual(1080, []int{240, 480, 720, 1080}, []int{240, 480, 720, 1080})
25 | if reflect.DeepEqual([]int{}, result) == false {
26 | t.Errorf("%+v", result)
27 | }
28 |
29 | result, _ = composeVirtual(480, []int{240, 480, 720, 1080}, []int{240, 480, 1080})
30 | if reflect.DeepEqual([]int{720}, result) == false {
31 | t.Errorf("%+v", result)
32 | }
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/worker/manifest.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "github.com/VKCOM/kive/hls_server"
5 | "github.com/VKCOM/kive/ktypes"
6 | "time"
7 | )
8 |
9 | type ManifestEntry struct {
10 | Fullname string `json:"fullname"`
11 | DvrChunkName string `json:"dvr_chunk_name"`
12 | Directory string `json:"dir"`
13 | Duration time.Duration `json:"duration"`
14 | Size int `json:"size"`
15 | DownloadUrl string `json:"download_url"` //postfix after internal
16 | StorageUrl string `json:"storage_url"`
17 | ChunkInfo ktypes.ChunkInfo `json:"chunk_info"`
18 | DvrChunkRequest hls_server.DvrChunkRequest `json:"dvr_request"`
19 | }
20 |
21 | type VodManifest struct {
22 | Playlist string `json:"playlist"`
23 | Chunks []ManifestEntry `json:"chunks"`
24 | StreamName string `json:"stream_name"`
25 | Duration int64 `json:"duration"`
26 | From int64 `json:"from"`
27 | DeletionChunks []ktypes.ChunkInfo `json:"deletion_chunks"`
28 | }
29 |
--------------------------------------------------------------------------------